diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-06-07 13:34:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-06-07 13:34:53 -0700 |
commit | f24b46ea10d7d2096ea1c9ce5746a0c85920ec62 (patch) | |
tree | 7b5897d93945799da7f2636683272f1bf939d3d4 | |
parent | e693c5026c28ab2ca1f718f66f47a6a041ec8f3a (diff) | |
parent | 526606b0a1998b0791b42c199d53550c3ba724b5 (diff) | |
download | lwn-f24b46ea10d7d2096ea1c9ce5746a0c85920ec62.tar.gz lwn-f24b46ea10d7d2096ea1c9ce5746a0c85920ec62.zip |
Merge tag 'iommu-fixes-v6.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu fixes from Joerg Roedel:
"Core:
- Make iommu-dma code recognize 'force_aperture' again
- Fix for potential NULL-ptr dereference from iommu_sva_bind_device()
return value
AMD IOMMU fixes:
- Fix lockdep splat for invalid wait context
- Add feature bit check before enabling PPR
- Make workqueue name fit into buffer
- Fix memory leak in sysfs code"
* tag 'iommu-fixes-v6.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/amd: Fix Invalid wait context issue
iommu/amd: Check EFR[EPHSup] bit before enabling PPR
iommu/amd: Fix workqueue name
iommu: Return right value in iommu_sva_bind_device()
iommu/dma: Fix domain init
iommu/amd: Fix sysfs leak in iommu init
-rw-r--r-- | drivers/iommu/amd/amd_iommu.h | 3 | ||||
-rw-r--r-- | drivers/iommu/amd/init.c | 9 | ||||
-rw-r--r-- | drivers/iommu/amd/iommu.c | 48 | ||||
-rw-r--r-- | drivers/iommu/amd/ppr.c | 25 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 8 | ||||
-rw-r--r-- | include/linux/iommu.h | 2 |
6 files changed, 45 insertions, 50 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 2fde1302a584..2d5945c982bd 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -129,7 +129,8 @@ static inline int check_feature_gpt_level(void) static inline bool amd_iommu_gt_ppr_supported(void) { return (check_feature(FEATURE_GT) && - check_feature(FEATURE_PPR)); + check_feature(FEATURE_PPR) && + check_feature(FEATURE_EPHSUP)); } static inline u64 iommu_virt_to_phys(void *vaddr) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index a18e74878f68..27e293727095 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1626,8 +1626,17 @@ static void __init free_pci_segments(void) } } +static void __init free_sysfs(struct amd_iommu *iommu) +{ + if (iommu->iommu.dev) { + iommu_device_unregister(&iommu->iommu); + iommu_device_sysfs_remove(&iommu->iommu); + } +} + static void __init free_iommu_one(struct amd_iommu *iommu) { + free_sysfs(iommu); free_cwwb_sem(iommu); free_command_buffer(iommu); free_event_buffer(iommu); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 52d83730a22a..c2703599bb16 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2032,7 +2032,6 @@ static int do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) { struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); - struct pci_dev *pdev; int ret = 0; /* Update data structures */ @@ -2047,30 +2046,13 @@ static int do_attach(struct iommu_dev_data *dev_data, domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; - pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + /* Setup GCR3 table */ if (pdom_is_sva_capable(domain)) { ret = init_gcr3_table(dev_data, domain); if (ret) return ret; - - if (pdev) { - pdev_enable_caps(pdev); - - /* - * Device can continue to function even if IOPF - * enablement failed. Hence in error path just - * disable device PRI support. - */ - if (amd_iommu_iopf_add_device(iommu, dev_data)) - pdev_disable_cap_pri(pdev); - } - } else if (pdev) { - pdev_enable_cap_ats(pdev); } - /* Update device table */ - amd_iommu_dev_update_dte(dev_data, true); - return ret; } @@ -2163,6 +2145,11 @@ static void detach_device(struct device *dev) do_detach(dev_data); +out: + spin_unlock(&dev_data->lock); + + spin_unlock_irqrestore(&domain->lock, flags); + /* Remove IOPF handler */ if (ppr) amd_iommu_iopf_remove_device(iommu, dev_data); @@ -2170,10 +2157,6 @@ static void detach_device(struct device *dev) if (dev_is_pci(dev)) pdev_disable_caps(to_pci_dev(dev)); -out: - spin_unlock(&dev_data->lock); - - spin_unlock_irqrestore(&domain->lock, flags); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) @@ -2485,6 +2468,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + struct pci_dev *pdev; int ret; /* @@ -2517,7 +2501,23 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + + /* Update device table */ + amd_iommu_dev_update_dte(dev_data, true); return ret; } diff --git a/drivers/iommu/amd/ppr.c b/drivers/iommu/amd/ppr.c index 091423bb8aac..7c67d69f0b8c 100644 --- a/drivers/iommu/amd/ppr.c +++ b/drivers/iommu/amd/ppr.c @@ -222,8 +222,7 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu) if (iommu->iopf_queue) return ret; - snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), - "amdiommu-%#x-iopfq", + snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "amdvi-%#x", PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid)); iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name); @@ -249,40 +248,26 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt, int amd_iommu_iopf_add_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; int ret = 0; if (!dev_data->pri_enabled) return ret; - raw_spin_lock_irqsave(&iommu->lock, flags); - - if (!iommu->iopf_queue) { - ret = -EINVAL; - goto out_unlock; - } + if (!iommu->iopf_queue) + return -EINVAL; ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev); if (ret) - goto out_unlock; + return ret; dev_data->ppr = true; - -out_unlock: - raw_spin_unlock_irqrestore(&iommu->lock, flags); - return ret; + return 0; } /* Its assumed that caller has verified that device was added to iopf queue */ void amd_iommu_iopf_remove_device(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - unsigned long flags; - - raw_spin_lock_irqsave(&iommu->lock, flags); - iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev); dev_data->ppr = false; - - raw_spin_unlock_irqrestore(&iommu->lock, flags); } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index f731e4b2a417..43520e7275cc 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -686,15 +686,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev /* Check the domain allows at least some access to the device... */ if (map) { - dma_addr_t base = dma_range_map_min(map); - if (base > domain->geometry.aperture_end || + if (dma_range_map_min(map) > domain->geometry.aperture_end || dma_range_map_max(map) < domain->geometry.aperture_start) { pr_warn("specified DMA range outside IOMMU capability\n"); return -EFAULT; } - /* ...then finally give it a kicking to make sure it fits */ - base_pfn = max(base, domain->geometry.aperture_start) >> order; } + /* ...then finally give it a kicking to make sure it fits */ + base_pfn = max_t(unsigned long, base_pfn, + domain->geometry.aperture_start >> order); /* start_pfn is always nonzero for an already-initialised domain */ mutex_lock(&cookie->mutex); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7bc8dff7cf6d..17b3f36ad843 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1533,7 +1533,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) { - return NULL; + return ERR_PTR(-ENODEV); } static inline void iommu_sva_unbind_device(struct iommu_sva *handle) |