iommu: Avoid more races around device probe
commita2e7e59a94
upstream. It turns out there are more subtle races beyond just the main part of __iommu_probe_device() itself running in parallel - the dev_iommu_free() on the way out of an unsuccessful probe can still manage to trip up concurrent accesses to a device's fwspec. Thus, extend the scope of iommu_probe_device_lock() to also serialise fwspec creation and initial retrieval. Reported-by: Zhenhua Huang <quic_zhenhuah@quicinc.com> Link: https://lore.kernel.org/linux-iommu/e2e20e1c-6450-4ac5-9804-b0000acdf7de@quicinc.com/ Fixes:01657bc14a
("iommu: Avoid races around device probe") Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: André Draszik <andre.draszik@linaro.org> Tested-by: André Draszik <andre.draszik@linaro.org> Link: https://lore.kernel.org/r/16f433658661d7cadfea51e7c65da95826112a2b.1700071477.git.robin.murphy@arm.com Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3c796895b4
commit
68bc7b2003
|
@ -1563,17 +1563,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
|
||||||
int err;
|
int err;
|
||||||
const struct iommu_ops *ops;
|
const struct iommu_ops *ops;
|
||||||
|
|
||||||
|
/* Serialise to make dev->iommu stable under our potential fwspec */
|
||||||
|
mutex_lock(&iommu_probe_device_lock);
|
||||||
/*
|
/*
|
||||||
* If we already translated the fwspec there is nothing left to do,
|
* If we already translated the fwspec there is nothing left to do,
|
||||||
* return the iommu_ops.
|
* return the iommu_ops.
|
||||||
*/
|
*/
|
||||||
ops = acpi_iommu_fwspec_ops(dev);
|
ops = acpi_iommu_fwspec_ops(dev);
|
||||||
if (ops)
|
if (ops) {
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
return ops;
|
return ops;
|
||||||
|
}
|
||||||
|
|
||||||
err = iort_iommu_configure_id(dev, id_in);
|
err = iort_iommu_configure_id(dev, id_in);
|
||||||
if (err && err != -EPROBE_DEFER)
|
if (err && err != -EPROBE_DEFER)
|
||||||
err = viot_iommu_configure(dev);
|
err = viot_iommu_configure(dev);
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have reason to believe the IOMMU driver missed the initial
|
* If we have reason to believe the IOMMU driver missed the initial
|
||||||
|
|
|
@ -278,12 +278,13 @@ static void dev_iommu_free(struct device *dev)
|
||||||
kfree(param);
|
kfree(param);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DEFINE_MUTEX(iommu_probe_device_lock);
|
||||||
|
|
||||||
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
||||||
{
|
{
|
||||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||||
struct iommu_device *iommu_dev;
|
struct iommu_device *iommu_dev;
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
static DEFINE_MUTEX(iommu_probe_device_lock);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!ops)
|
if (!ops)
|
||||||
|
@ -295,11 +296,9 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||||
* probably be able to use device_lock() here to minimise the scope,
|
* probably be able to use device_lock() here to minimise the scope,
|
||||||
* but for now enforcing a simple global ordering is fine.
|
* but for now enforcing a simple global ordering is fine.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&iommu_probe_device_lock);
|
lockdep_assert_held(&iommu_probe_device_lock);
|
||||||
if (!dev_iommu_get(dev)) {
|
if (!dev_iommu_get(dev))
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto err_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!try_module_get(ops->owner)) {
|
if (!try_module_get(ops->owner)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -326,7 +325,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||||
mutex_unlock(&group->mutex);
|
mutex_unlock(&group->mutex);
|
||||||
iommu_group_put(group);
|
iommu_group_put(group);
|
||||||
|
|
||||||
mutex_unlock(&iommu_probe_device_lock);
|
|
||||||
iommu_device_link(iommu_dev, dev);
|
iommu_device_link(iommu_dev, dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -341,9 +339,6 @@ out_module_put:
|
||||||
err_free:
|
err_free:
|
||||||
dev_iommu_free(dev);
|
dev_iommu_free(dev);
|
||||||
|
|
||||||
err_unlock:
|
|
||||||
mutex_unlock(&iommu_probe_device_lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,7 +348,9 @@ int iommu_probe_device(struct device *dev)
|
||||||
struct iommu_group *group;
|
struct iommu_group *group;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&iommu_probe_device_lock);
|
||||||
ret = __iommu_probe_device(dev, NULL);
|
ret = __iommu_probe_device(dev, NULL);
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
|
@ -1684,7 +1681,9 @@ static int probe_iommu_group(struct device *dev, void *data)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&iommu_probe_device_lock);
|
||||||
ret = __iommu_probe_device(dev, group_list);
|
ret = __iommu_probe_device(dev, group_list);
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
if (ret == -ENODEV)
|
if (ret == -ENODEV)
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
|
|
|
@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||||
const u32 *id)
|
const u32 *id)
|
||||||
{
|
{
|
||||||
const struct iommu_ops *ops = NULL;
|
const struct iommu_ops *ops = NULL;
|
||||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
struct iommu_fwspec *fwspec;
|
||||||
int err = NO_IOMMU;
|
int err = NO_IOMMU;
|
||||||
|
|
||||||
if (!master_np)
|
if (!master_np)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Serialise to make dev->iommu stable under our potential fwspec */
|
||||||
|
mutex_lock(&iommu_probe_device_lock);
|
||||||
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
if (fwspec) {
|
if (fwspec) {
|
||||||
if (fwspec->ops)
|
if (fwspec->ops) {
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
return fwspec->ops;
|
return fwspec->ops;
|
||||||
|
}
|
||||||
/* In the deferred case, start again from scratch */
|
/* In the deferred case, start again from scratch */
|
||||||
iommu_fwspec_free(dev);
|
iommu_fwspec_free(dev);
|
||||||
}
|
}
|
||||||
|
@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||||
fwspec = dev_iommu_fwspec_get(dev);
|
fwspec = dev_iommu_fwspec_get(dev);
|
||||||
ops = fwspec->ops;
|
ops = fwspec->ops;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we have reason to believe the IOMMU driver missed the initial
|
* If we have reason to believe the IOMMU driver missed the initial
|
||||||
* probe for dev, replay it to get things in order.
|
* probe for dev, replay it to get things in order.
|
||||||
|
|
|
@ -657,6 +657,7 @@ static inline void dev_iommu_priv_set(struct device *dev, void *priv)
|
||||||
dev->iommu->priv = priv;
|
dev->iommu->priv = priv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern struct mutex iommu_probe_device_lock;
|
||||||
int iommu_probe_device(struct device *dev);
|
int iommu_probe_device(struct device *dev);
|
||||||
void iommu_release_device(struct device *dev);
|
void iommu_release_device(struct device *dev);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue