diff options
author | Matthew Brost <matthew.brost@intel.com> | 2024-04-24 21:55:09 -0700 |
---|---|---|
committer | Matthew Brost <matthew.brost@intel.com> | 2024-04-26 12:10:04 -0700 |
commit | 61e3270ef9610ab40b1b56aa62e2b8471c32f1f7 (patch) | |
tree | 09fcc73024e33c5f011ddc2d988c025d32906955 /drivers/gpu/drm/xe/xe_vm.c | |
parent | 22cfdd286572decf5225cc219205ca3348cfc4af (diff) | |
download | lwn-61e3270ef9610ab40b1b56aa62e2b8471c32f1f7.tar.gz lwn-61e3270ef9610ab40b1b56aa62e2b8471c32f1f7.zip |
drm/xe: Add vm_bind_ioctl_ops_fini helper
Simplify VM bind code by signaling out-fences / destroying VMAs in a
single location. Will help with transition single job for many bind ops.
v2:
- s/vm_bind_ioctl_ops_install_fences/vm_bind_ioctl_ops_fini (Oak)
- Set last fence in vm_bind_ioctl_ops_fini (Oak)
Cc: Oak Zeng <oak.zeng@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Oak Zeng <oak.zeng@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240425045513.1913039-10-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vm.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_vm.c | 62 |
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 45258d38d4ee..2f19372aaad5 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1646,7 +1646,7 @@ xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct dma_fence *fence = NULL; struct dma_fence **fences = NULL; struct dma_fence_array *cf = NULL; - int cur_fence = 0, i; + int cur_fence = 0; int number_tiles = hweight8(vma->tile_present); int err; u8 id; @@ -1704,10 +1704,6 @@ next: fence = cf ? &cf->base : !fence ? xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; - if (last_op) { - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], fence); - } return fence; @@ -1731,7 +1727,7 @@ xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, struct dma_fence **fences = NULL; struct dma_fence_array *cf = NULL; struct xe_vm *vm = xe_vma_vm(vma); - int cur_fence = 0, i; + int cur_fence = 0; int number_tiles = hweight8(tile_mask); int err; u8 id; @@ -1778,12 +1774,6 @@ next: } } - if (last_op) { - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], - cf ? &cf->base : fence); - } - return cf ? &cf->base : fence; err_fences: @@ -1835,20 +1825,11 @@ xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, if (IS_ERR(fence)) return fence; } else { - int i; - xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); - if (last_op) { - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], fence); - } } - if (last_op) - xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); - return fence; } @@ -1858,7 +1839,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, u32 num_syncs, bool first_op, bool last_op) { struct dma_fence *fence; - struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); xe_vm_assert_held(vm); xe_bo_assert_held(xe_vma_bo(vma)); @@ -1867,10 +1847,6 @@ xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, if (IS_ERR(fence)) return fence; - xe_vma_destroy(vma, fence); - if (last_op) - xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); - return fence; } @@ -2025,17 +2001,7 @@ xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, vma->tile_mask, true, first_op, last_op); } else { - struct dma_fence *fence = - xe_exec_queue_last_fence_get(wait_exec_queue, vm); - int i; - - /* Nothing to do, signal fences now */ - if (last_op) { - for (i = 0; i < num_syncs; i++) - xe_sync_entry_signal(&syncs[i], fence); - } - - return fence; + return xe_exec_queue_last_fence_get(wait_exec_queue, vm); } } @@ -2838,6 +2804,26 @@ static struct dma_fence *ops_execute(struct xe_vm *vm, return fence; } +static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, + struct dma_fence *fence) +{ + struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q); + struct xe_vma_op *op; + int i; + + list_for_each_entry(op, &vops->list, link) { + if (op->base.op == DRM_GPUVA_OP_UNMAP) + xe_vma_destroy(gpuva_to_vma(op->base.unmap.va), fence); + else if (op->base.op == DRM_GPUVA_OP_REMAP) + xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), + fence); + } + for (i = 0; i < vops->num_syncs; i++) + xe_sync_entry_signal(vops->syncs + i, fence); + xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); + dma_fence_put(fence); +} + static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, struct xe_vma_ops *vops) { @@ -2862,7 +2848,7 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, xe_vm_kill(vm, false); goto unlock; } else { - dma_fence_put(fence); + vm_bind_ioctl_ops_fini(vm, vops, fence); } } |