diff options
author | Joel Granados <joel.granados@kernel.org> | 2024-11-04 09:40:34 +0800 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2024-11-05 13:32:24 +0100 |
commit | 4d5440957641fb5652cbef8df6183baf473cab6b (patch) | |
tree | e831adf6dace43b86bf3247aa581c658c133db0c /drivers/iommu/intel/iommu.c | |
parent | f1645676f25d2c846798f0233c3a953efd62aafb (diff) | |
download | lwn-4d5440957641fb5652cbef8df6183baf473cab6b.tar.gz lwn-4d5440957641fb5652cbef8df6183baf473cab6b.zip |
iommu/vt-d: Separate page request queue from SVM
IO page faults are no longer dependent on CONFIG_INTEL_IOMMU_SVM. Move
all Page Request Queue (PRQ) functions that handle prq events to a new
file in drivers/iommu/intel/prq.c. The page_req_des struct is now
declared in drivers/iommu/intel/prq.c.
No functional changes are intended. This is a preparation patch to
enable the use of IO page faults outside the SVM/PASID use cases.
Signed-off-by: Joel Granados <joel.granados@kernel.org>
Link: https://lore.kernel.org/r/20241015-jag-iopfv8-v4-1-b696ca89ba29@kernel.org
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r-- | drivers/iommu/intel/iommu.c | 20 |
1 files changed, 7 insertions, 13 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 5095147f6ba2..d0c325115b45 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1329,12 +1329,10 @@ static void free_dmar_iommu(struct intel_iommu *iommu) /* free context mapping */ free_context_table(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_supported(iommu)) { if (ecap_prs(iommu->ecap)) - intel_svm_finish_prq(iommu); + intel_iommu_finish_prq(iommu); } -#endif } /* @@ -2194,19 +2192,18 @@ static int __init init_dmars(void) iommu_flush_write_buffer(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { /* * Call dmar_alloc_hwirq() with dmar_global_lock held, * could cause possible lock race condition. */ up_write(&dmar_global_lock); - ret = intel_svm_enable_prq(iommu); + ret = intel_iommu_enable_prq(iommu); down_write(&dmar_global_lock); if (ret) goto free_iommu; } -#endif + ret = dmar_set_interrupt(iommu); if (ret) goto free_iommu; @@ -2619,13 +2616,12 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) intel_iommu_init_qi(iommu); iommu_flush_write_buffer(iommu); -#ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) { - ret = intel_svm_enable_prq(iommu); + ret = intel_iommu_enable_prq(iommu); if (ret) goto disable_iommu; } -#endif + ret = dmar_set_interrupt(iommu); if (ret) goto disable_iommu; @@ -4072,7 +4068,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, intel_iommu_debugfs_remove_dev_pasid(dev_pasid); kfree(dev_pasid); intel_pasid_tear_down_entry(iommu, dev, pasid, false); - intel_drain_pasid_prq(dev, pasid); + intel_iommu_drain_pasid_prq(dev, pasid); } static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, @@ -4415,9 +4411,7 @@ const struct iommu_ops intel_iommu_ops = { .def_domain_type = device_def_domain_type, .remove_dev_pasid = intel_iommu_remove_dev_pasid, .pgsize_bitmap = SZ_4K, -#ifdef CONFIG_INTEL_IOMMU_SVM - .page_response = intel_svm_page_response, -#endif + .page_response = intel_iommu_page_response, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = intel_iommu_attach_device, .set_dev_pasid = intel_iommu_set_dev_pasid, |