summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/svm.c
diff options
context:
space:
mode:
authorPasha Tatashin <pasha.tatashin@soleen.com>2024-04-13 00:25:12 +0000
committerJoerg Roedel <jroedel@suse.de>2024-04-15 14:31:40 +0200
commit06c375053cefc3a2f383d200596abe5ab3fb35f9 (patch)
tree39803ef6a16135038b116b43b263d7272f55436d /drivers/iommu/intel/svm.c
parent0bbac3facb5d6cc0171c45c9873a2dc96bea9680 (diff)
downloadlwn-06c375053cefc3a2f383d200596abe5ab3fb35f9.tar.gz
lwn-06c375053cefc3a2f383d200596abe5ab3fb35f9.zip
iommu/vt-d: add wrapper functions for page allocations
In order to improve observability and accountability of IOMMU layer, we must account the number of pages that are allocated by functions that are calling directly into buddy allocator. This is achieved by first wrapping the allocation related functions into a separate inline functions in new file: drivers/iommu/iommu-pages.h Convert all page allocation calls under iommu/intel to use these new functions. Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Acked-by: David Rientjes <rientjes@google.com> Tested-by: Bagas Sanjaya <bagasdotme@gmail.com> Link: https://lore.kernel.org/r/20240413002522.1101315-2-pasha.tatashin@soleen.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel/svm.c')
-rw-r--r--drivers/iommu/intel/svm.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ee3b469e2da1..71c1b2a0ca16 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -22,6 +22,7 @@
#include "iommu.h"
#include "pasid.h"
#include "perf.h"
+#include "../iommu-pages.h"
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
@@ -63,16 +64,14 @@ svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
int intel_svm_enable_prq(struct intel_iommu *iommu)
{
struct iopf_queue *iopfq;
- struct page *pages;
int irq, ret;
- pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
- if (!pages) {
+ iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+ if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
return -ENOMEM;
}
- iommu->prq = page_address(pages);
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
if (irq <= 0) {
@@ -117,7 +116,7 @@ free_hwirq:
dmar_free_hwirq(irq);
iommu->pr_irq = 0;
free_prq:
- free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return ret;
@@ -140,7 +139,7 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
iommu->iopf_queue = NULL;
}
- free_pages((unsigned long)iommu->prq, PRQ_ORDER);
+ iommu_free_pages(iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
return 0;