summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2023-11-15 18:25:44 +0000
committerJoerg Roedel <jroedel@suse.de>2023-11-27 10:53:55 +0100
commita2e7e59a94269484a83386972ca07c22fd188854 (patch)
tree342d6c7af571cd2c96c740b3b125097c487d92c1 /drivers/iommu/iommu.c
parenta99583e2aff64baf27b04b7d3a0341a52bf8e047 (diff)
downloadlwn-a2e7e59a94269484a83386972ca07c22fd188854.tar.gz
lwn-a2e7e59a94269484a83386972ca07c22fd188854.zip
iommu: Avoid more races around device probe
It turns out there are more subtle races beyond just the main part of __iommu_probe_device() itself running in parallel - the dev_iommu_free() on the way out of an unsuccessful probe can still manage to trip up concurrent accesses to a device's fwspec. Thus, extend the scope of iommu_probe_device_lock() to also serialise fwspec creation and initial retrieval. Reported-by: Zhenhua Huang <quic_zhenhuah@quicinc.com> Link: https://lore.kernel.org/linux-iommu/e2e20e1c-6450-4ac5-9804-b0000acdf7de@quicinc.com/ Fixes: 01657bc14a39 ("iommu: Avoid races around device probe") Signed-off-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Reviewed-by: André Draszik <andre.draszik@linaro.org> Tested-by: André Draszik <andre.draszik@linaro.org> Link: https://lore.kernel.org/r/16f433658661d7cadfea51e7c65da95826112a2b.1700071477.git.robin.murphy@arm.com Cc: stable@vger.kernel.org Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c9a05bb49bfa..33e2a9b5d339 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev)
dev_iommu_free(dev);
}
+DEFINE_MUTEX(iommu_probe_device_lock);
+
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
- static DEFINE_MUTEX(iommu_probe_device_lock);
struct group_device *gdev;
int ret;
@@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
- mutex_lock(&iommu_probe_device_lock);
+ lockdep_assert_held(&iommu_probe_device_lock);
/* Device is probed already if in a group */
- if (dev->iommu_group) {
- ret = 0;
- goto out_unlock;
- }
+ if (dev->iommu_group)
+ return 0;
ret = iommu_init_device(dev, ops);
if (ret)
- goto out_unlock;
+ return ret;
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
list_add_tail(&group->entry, group_list);
}
mutex_unlock(&group->mutex);
- mutex_unlock(&iommu_probe_device_lock);
if (dev_is_pci(dev))
iommu_dma_set_pci_32bit_workaround(dev);
@@ -562,8 +560,6 @@ err_put_group:
iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
-out_unlock:
- mutex_unlock(&iommu_probe_device_lock);
return ret;
}
@@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev)
const struct iommu_ops *ops;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, NULL);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret)
return ret;
@@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data)
struct list_head *group_list = data;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, group_list);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret == -ENODEV)
ret = 0;