diff options
author | Wachowski, Karol <karol.wachowski@intel.com> | 2024-01-15 14:44:28 +0100 |
---|---|---|
committer | Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> | 2024-01-22 10:27:47 +0100 |
commit | 8047d36fe563bf7ee7b28a284a0892506a6000a9 (patch) | |
tree | 41052f4fd31e9670433f453f7a1a728bcdf3cc19 | |
parent | 929acfb9c53986d5ba37cfb9e1172ad79735f8eb (diff) | |
download | lwn-8047d36fe563bf7ee7b28a284a0892506a6000a9.tar.gz lwn-8047d36fe563bf7ee7b28a284a0892506a6000a9.zip |
accel/ivpu: Add debug prints for MMU map/unmap operations
It is common need to be able to see IOVA/physical to VPU addresses
mappings. Especially when debugging different kind of memory related
issues. Lack of such logs forces user to modify and recompile KMD manually.
This commit adds those logs under MMU debug mask which can be turned on
dynamically with module param during KMD load.
Signed-off-by: Wachowski, Karol <karol.wachowski@intel.com>
Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240115134434.493839-4-jacek.lawrynowicz@linux.intel.com
-rw-r--r-- | drivers/accel/ivpu/ivpu_drv.h | 1 | ||||
-rw-r--r-- | drivers/accel/ivpu/ivpu_mmu_context.c | 9 |
2 files changed, 10 insertions, 0 deletions
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index ebc4b84f27b2..9b6e336626e3 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -56,6 +56,7 @@ #define IVPU_DBG_JSM BIT(10) #define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_RPM BIT(12) +#define IVPU_DBG_MMU_MAP BIT(13) #define ivpu_err(vdev, fmt, ...) \ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index 12a8c09d4547..fe6161299236 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; size_t size = sg_dma_len(sg) + sg->offset; + ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", + ctx->id, dma_addr, vpu_addr, size); + ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot); if (ret) { ivpu_err(vdev, "Failed to map context pages\n"); @@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, /* Ensure page table modifications are flushed from wc buffers to memory */ wmb(); + mutex_unlock(&ctx->lock); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); @@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct mutex_lock(&ctx->lock); for_each_sgtable_dma_sg(sgt, sg, i) { + dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset; size_t size = sg_dma_len(sg) + sg->offset; + ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n", + ctx->id, dma_addr, vpu_addr, size); + ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size); vpu_addr += size; } /* Ensure page table modifications are flushed from wc buffers to memory */ wmb(); + mutex_unlock(&ctx->lock); ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); |