summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2023-11-20 12:08:48 -0800
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 11:44:51 -0500
commit40709aa761acbc78fe6c0405720d79cbf8345095 (patch)
tree5e7c11bf31579a6ce05f227c8e87c3414794adac /drivers/gpu/drm
parent0bc519d20ffa7a450bfa21c644c2de95ae8027dc (diff)
downloadlwn-40709aa761acbc78fe6c0405720d79cbf8345095.tar.gz
lwn-40709aa761acbc78fe6c0405720d79cbf8345095.zip
drm/xe: Only set xe_vma_op.map fields for GPUVA map operations
DRM_XE_VM_BIND_OP_MAP_* IOCTL operations can result in GPUVA unmap, remap, or map operations in vm_bind_ioctl_ops_create. The xe_vma_op.map fields are blindly set which is incorrect for GPUVA unmap or remap operations. Fix this by only setting xe_vma_op.map for GPUVA map operations. Also restructure a bit vm_bind_ioctl_ops_create to make the code a bit more readable. Reported-by: Dafna Hirschfeld <dhirschfeld@habana.ai> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Brian Welty <brian.welty@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c59
1 files changed, 18 insertions, 41 deletions
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 174441c4ca5a..1b4d340d0604 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2182,42 +2182,12 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
case DRM_XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
obj, bo_offset_or_userptr);
- if (IS_ERR(ops))
- return ops;
-
- drm_gpuva_for_each_op(__op, ops) {
- struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
- op->tile_mask = tile_mask;
- op->map.immediate =
- flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
- op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
- }
break;
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
- if (IS_ERR(ops))
- return ops;
-
- drm_gpuva_for_each_op(__op, ops) {
- struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
- op->tile_mask = tile_mask;
- }
break;
case DRM_XE_VM_BIND_OP_PREFETCH:
ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range);
- if (IS_ERR(ops))
- return ops;
-
- drm_gpuva_for_each_op(__op, ops) {
- struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
- op->tile_mask = tile_mask;
- op->prefetch.region = prefetch_region;
- }
break;
case DRM_XE_VM_BIND_OP_UNMAP_ALL:
xe_assert(vm->xe, bo);
@@ -2233,19 +2203,13 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
ops = drm_gpuvm_bo_unmap_ops_create(vm_bo);
drm_gpuvm_bo_put(vm_bo);
xe_bo_unlock(bo);
- if (IS_ERR(ops))
- return ops;
-
- drm_gpuva_for_each_op(__op, ops) {
- struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
-
- op->tile_mask = tile_mask;
- }
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
ops = ERR_PTR(-EINVAL);
}
+ if (IS_ERR(ops))
+ return ops;
#ifdef TEST_VM_ASYNC_OPS_ERROR
if (operation & FORCE_ASYNC_OP_ERROR) {
@@ -2256,9 +2220,22 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
}
#endif
- if (!IS_ERR(ops))
- drm_gpuva_for_each_op(__op, ops)
- print_op(vm->xe, __op);
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ op->tile_mask = tile_mask;
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.immediate =
+ flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
+ op->map.read_only =
+ flags & DRM_XE_VM_BIND_FLAG_READONLY;
+ op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ } else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
+ op->prefetch.region = prefetch_region;
+ }
+
+ print_op(vm->xe, __op);
+ }
return ops;
}