summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_pt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_pt.c')
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c1355
1 files changed, 921 insertions, 434 deletions
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 31a751a5de3f..d6353e8969f0 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -3,18 +3,23 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_drm_client.h"
+#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
+#include "xe_sched_job.h"
+#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -325,6 +330,7 @@ xe_pt_new_shared(struct xe_walk_update *wupd, struct xe_pt *parent,
entry->pt = parent;
entry->flags = 0;
entry->qwords = 0;
+ entry->pt_bo->update_index = -1;
if (alloc_entries) {
entry->pt_entries = kmalloc_array(XE_PDES,
@@ -842,19 +848,27 @@ xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *t
}
}
-static void xe_pt_abort_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
+static void xe_pt_cancel_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
u32 i, j;
for (i = 0; i < num_entries; i++) {
- if (!entries[i].pt_entries)
+ struct xe_pt *pt = entries[i].pt;
+
+ if (!pt)
continue;
- for (j = 0; j < entries[i].qwords; j++)
- xe_pt_destroy(entries[i].pt_entries[j].pt, xe_vma_vm(vma)->flags, NULL);
+ if (pt->level) {
+ for (j = 0; j < entries[i].qwords; j++)
+ xe_pt_destroy(entries[i].pt_entries[j].pt,
+ xe_vma_vm(vma)->flags, NULL);
+ }
+
kfree(entries[i].pt_entries);
+ entries[i].pt_entries = NULL;
+ entries[i].qwords = 0;
}
}
@@ -864,18 +878,15 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
lockdep_assert_held(&vm->lock);
- if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
- else if (!xe_vma_is_null(vma))
+ if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
-static void xe_pt_commit_bind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries, bool rebind,
- struct llist_head *deferred)
+static void xe_pt_commit(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, struct llist_head *deferred)
{
u32 i, j;
@@ -883,31 +894,90 @@ static void xe_pt_commit_bind(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+
+ if (!pt->level)
+ continue;
+
+ for (j = 0; j < entries[i].qwords; j++) {
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ }
+ }
+}
+
+static void xe_pt_abort_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_pt *pt = entries[i].pt;
struct xe_pt_dir *pt_dir;
if (!rebind)
- pt->num_live += entries[i].qwords;
+ pt->num_live -= entries[i].qwords;
- if (!pt->level) {
- kfree(entries[i].pt_entries);
+ if (!pt->level)
continue;
+
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = 0; j < entries[i].qwords; j++) {
+ u32 j_ = j + entries[i].ofs;
+ struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+
+ pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
+ }
+}
+
+static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries, bool rebind)
+{
+ u32 i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = 0; i < num_entries; i++) {
+ struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
+
+ if (!rebind)
+ pt->num_live += entries[i].qwords;
+
+ if (!pt->level)
+ continue;
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
+ struct xe_pt *oldpte = NULL;
if (xe_pt_entry(pt_dir, j_))
- xe_pt_destroy(xe_pt_entry(pt_dir, j_),
- xe_vma_vm(vma)->flags, deferred);
+ oldpte = xe_pt_entry(pt_dir, j_);
pt_dir->children[j_] = &newpte->base;
+ entries[i].pt_entries[j].pt = oldpte;
}
- kfree(entries[i].pt_entries);
}
}
+static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ u32 i;
+
+ for (i = 0; i < num_entries; i++)
+ kfree(entries[i].pt_entries);
+}
+
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
struct xe_vm_pgtable_update *entries, u32 *num_entries)
@@ -918,20 +988,19 @@ xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
err = xe_pt_stage_bind(tile, vma, entries, num_entries);
if (!err)
xe_tile_assert(tile, *num_entries);
- else /* abort! */
- xe_pt_abort_bind(vma, entries, *num_entries);
return err;
}
static void xe_vm_dbg_print_entries(struct xe_device *xe,
const struct xe_vm_pgtable_update *entries,
- unsigned int num_entries)
+ unsigned int num_entries, bool bind)
#if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
{
unsigned int i;
- vm_dbg(&xe->drm, "%u entries to update\n", num_entries);
+ vm_dbg(&xe->drm, "%s: %u entries to update\n", bind ? "bind" : "unbind",
+ num_entries);
for (i = 0; i < num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *xe_pt = entry->pt;
@@ -952,66 +1021,108 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
{}
#endif
-#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
-
-static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+static bool no_in_syncs(struct xe_sync_entry *syncs, u32 num_syncs)
{
- u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
- static u32 count;
+ int i;
- if (count++ % divisor == divisor - 1) {
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ for (i = 0; i < num_syncs; i++) {
+ struct dma_fence *fence = syncs[i].fence;
- uvma->userptr.divisor = divisor << 1;
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- return true;
+ if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags))
+ return false;
}
- return false;
+ return true;
}
-#else
+static int job_test_add_deps(struct xe_sched_job *job,
+ struct dma_resv *resv,
+ enum dma_resv_usage usage)
+{
+ if (!job) {
+ if (!dma_resv_test_signaled(resv, usage))
+ return -ETIME;
-static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+ return 0;
+ }
+
+ return xe_sched_job_add_deps(job, resv, usage);
+}
+
+static int vma_add_deps(struct xe_vma *vma, struct xe_sched_job *job)
{
- return false;
+ struct xe_bo *bo = xe_vma_bo(vma);
+
+ xe_bo_assert_held(bo);
+
+ if (bo && !bo->vm)
+ return job_test_add_deps(job, bo->ttm.base.resv,
+ DMA_RESV_USAGE_KERNEL);
+
+ return 0;
}
-#endif
+static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_sched_job *job)
+{
+ int err = 0;
-/**
- * struct xe_pt_migrate_pt_update - Callback argument for pre-commit callbacks
- * @base: Base we derive from.
- * @bind: Whether this is a bind or an unbind operation. A bind operation
- * makes the pre-commit callback error with -EAGAIN if it detects a
- * pending invalidation.
- * @locked: Whether the pre-commit callback locked the userptr notifier lock
- * and it needs unlocking.
- */
-struct xe_pt_migrate_pt_update {
- struct xe_migrate_pt_update base;
- bool bind;
- bool locked;
-};
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_add_deps(op->map.vma, job);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_add_deps(op->remap.prev, job);
+ if (!err && op->remap.next)
+ err = vma_add_deps(op->remap.next, job);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
-/*
- * This function adds the needed dependencies to a page-table update job
- * to make sure racing jobs for separate bind engines don't race writing
- * to the same page-table range, wreaking havoc. Initially use a single
- * fence for the entire VM. An optimization would use smaller granularity.
- */
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
- struct xe_range_fence_tree *rftree,
- u64 start, u64 last)
+ struct xe_vm *vm,
+ struct xe_vma_ops *vops,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_range_fence_tree *rftree)
{
struct xe_range_fence *rtfence;
struct dma_fence *fence;
- int err;
+ struct xe_vma_op *op;
+ int err = 0, i;
+
+ xe_vm_assert_held(vm);
+
+ if (!job && !no_in_syncs(vops->syncs, vops->num_syncs))
+ return -ETIME;
- rtfence = xe_range_fence_tree_first(rftree, start, last);
+ if (!job && !xe_exec_queue_is_idle(pt_update_ops->q))
+ return -ETIME;
+
+ if (pt_update_ops->wait_vm_bookkeep || pt_update_ops->wait_vm_kernel) {
+ err = job_test_add_deps(job, xe_vm_resv(vm),
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_BOOKKEEP :
+ DMA_RESV_USAGE_KERNEL);
+ if (err)
+ return err;
+ }
+
+ rtfence = xe_range_fence_tree_first(rftree, pt_update_ops->start,
+ pt_update_ops->last);
while (rtfence) {
fence = rtfence->fence;
@@ -1029,80 +1140,175 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
- rtfence = xe_range_fence_tree_next(rtfence, start, last);
+ rtfence = xe_range_fence_tree_next(rtfence,
+ pt_update_ops->start,
+ pt_update_ops->last);
}
- return 0;
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_add_deps(vm, op, job);
+ if (err)
+ return err;
+ }
+
+ if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
+ if (job)
+ err = xe_sched_job_last_fence_add_dep(job, vm);
+ else
+ err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
+ }
+
+ for (i = 0; job && !err && i < vops->num_syncs; i++)
+ err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+
+ return err;
}
static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- struct xe_range_fence_tree *rftree =
- &xe_vma_vm(pt_update->vma)->rftree[pt_update->tile_id];
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm *vm = vops->vm;
+ struct xe_range_fence_tree *rftree = &vm->rftree[pt_update->tile_id];
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+
+ return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ pt_update_ops, rftree);
+}
+
+#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
- return xe_pt_vm_dependencies(pt_update->job, rftree,
- pt_update->start, pt_update->last);
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
+{
+ u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
+ static u32 count;
+
+ if (count++ % divisor == divisor - 1) {
+ uvma->userptr.divisor = divisor << 1;
+ return true;
+ }
+
+ return false;
}
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+#else
+
+static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
{
- struct xe_pt_migrate_pt_update *userptr_update =
- container_of(pt_update, typeof(*userptr_update), base);
- struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
- unsigned long notifier_seq = uvma->userptr.notifier_seq;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
- int err = xe_pt_vm_dependencies(pt_update->job,
- &vm->rftree[pt_update->tile_id],
- pt_update->start,
- pt_update->last);
+ return false;
+}
- if (err)
- return err;
+#endif
- userptr_update->locked = false;
+static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ struct xe_userptr_vma *uvma;
+ unsigned long notifier_seq;
- /*
- * Wait until nobody is running the invalidation notifier, and
- * since we're exiting the loop holding the notifier lock,
- * nobody can proceed invalidating either.
- *
- * Note that we don't update the vma->userptr.notifier_seq since
- * we don't update the userptr pages.
- */
- do {
- down_read(&vm->userptr.notifier_lock);
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq))
- break;
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
- up_read(&vm->userptr.notifier_lock);
+ if (!xe_vma_is_userptr(vma))
+ return 0;
- if (userptr_update->bind)
- return -EAGAIN;
+ uvma = to_userptr_vma(vma);
+ notifier_seq = uvma->userptr.notifier_seq;
- notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
- } while (true);
+ if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
+ return 0;
- /* Inject errors to test_whether they are handled correctly */
- if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
- up_read(&vm->userptr.notifier_lock);
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ notifier_seq) &&
+ !xe_pt_userptr_inject_eagain(uvma))
+ return 0;
+
+ if (xe_vm_in_fault_mode(vm)) {
return -EAGAIN;
- }
+ } else {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
- userptr_update->locked = true;
+ if (xe_vm_in_preempt_fence_mode(vm)) {
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ long err;
+
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+ }
+ }
return 0;
}
-static const struct xe_migrate_pt_update_ops bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_pre_commit,
-};
+static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
+{
+ int err = 0;
-static const struct xe_migrate_pt_update_ops userptr_bind_ops = {
- .populate = xe_vm_populate_pgtable,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = vma_check_userptr(vm, op->map.vma, pt_update);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ if (op->remap.prev)
+ err = vma_check_userptr(vm, op->remap.prev, pt_update);
+ if (!err && op->remap.next)
+ err = vma_check_userptr(vm, op->remap.next, pt_update);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
+ pt_update);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+
+ return err;
+}
+
+static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ down_read(&vm->userptr.notifier_lock);
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_userptr(vm, op, pt_update_ops);
+ if (err) {
+ up_read(&vm->userptr.notifier_lock);
+ break;
+ }
+ }
+
+ return err;
+}
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
@@ -1144,10 +1350,10 @@ static void invalidation_fence_work_func(struct work_struct *w)
ifence->end, ifence->asid);
}
-static int invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static void invalidation_fence_init(struct xe_gt *gt,
+ struct invalidation_fence *ifence,
+ struct dma_fence *fence,
+ u64 start, u64 end, u32 asid)
{
int ret;
@@ -1172,192 +1378,6 @@ static int invalidation_fence_init(struct xe_gt *gt,
}
xe_gt_assert(gt, !ret || ret == -ENOENT);
-
- return ret && ret != -ENOENT ? ret : 0;
-}
-
-static void xe_pt_calc_rfence_interval(struct xe_vma *vma,
- struct xe_pt_migrate_pt_update *update,
- struct xe_vm_pgtable_update *entries,
- u32 num_entries)
-{
- int i, level = 0;
-
- for (i = 0; i < num_entries; i++) {
- const struct xe_vm_pgtable_update *entry = &entries[i];
-
- if (entry->pt->level > level)
- level = entry->pt->level;
- }
-
- /* Greedy (non-optimal) calculation but simple */
- update->base.start = ALIGN_DOWN(xe_vma_start(vma),
- 0x1ull << xe_pt_shift(level));
- update->base.last = ALIGN(xe_vma_end(vma),
- 0x1ull << xe_pt_shift(level)) - 1;
-}
-
-/**
- * __xe_pt_bind_vma() - Build and connect a page-table tree for the vma
- * address range.
- * @tile: The tile to bind for.
- * @vma: The vma to bind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before binding the built tree to the live vm tree.
- * @num_syncs: Number of @sync entries.
- * @rebind: Whether we're rebinding this vma to the same address range without
- * an unbind in-between.
- *
- * This function builds a page-table tree (see xe_pt_stage_bind() for more
- * information on page-table building), and the xe_vm_pgtable_update entries
- * abstracting the operations needed to attach it to the main vm tree. It
- * then takes the relevant locks and updates the metadata side of the main
- * vm tree and submits the operations for pipelined attachment of the
- * gpu page-table to the vm main tree, (which can be done either by the
- * cpu and the GPU).
- *
- * Return: A valid dma-fence representing the pipelined attachment operation
- * on success, an error pointer on error.
- */
-struct dma_fence *
-__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs,
- bool rebind)
-{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update bind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_bind_ops : &bind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- .bind = true,
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL;
- struct xe_range_fence *rfence;
- int err;
-
- bind_pt_update.locked = false;
- xe_bo_assert_held(xe_vma_bo(vma));
- xe_vm_assert_held(vm);
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing bind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
-
- err = xe_pt_prepare_bind(tile, vma, entries, &num_entries);
- if (err)
- goto err;
-
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- goto err;
-
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &bind_pt_update, entries,
- num_entries);
-
- /*
- * If rebind, we have to invalidate TLB on !LR vms to invalidate
- * cached PTEs point to freed memory. on LR vms this is done
- * automatically when the context is re-enabled by the rebind worker,
- * or in fault mode it was invalidated on PTE zapping.
- *
- * If !rebind, and scratch enabled VMs, there is a chance the scratch
- * PTE is already cached in the TLB so it needs to be invalidated.
- * on !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
- */
- if ((!rebind && xe_vm_has_scratch(vm) && xe_vm_in_preempt_fence_mode(vm))) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
- } else if (rebind && !xe_vm_in_lr_mode(vm)) {
- /* We bump also if batch_invalidate_tlb is true */
- vm->tlb_flush_seqno++;
- }
-
- rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
- if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
- }
-
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, xe_vma_bo(vma), q,
- entries, num_entries,
- syncs, num_syncs,
- &bind_pt_update.base);
- if (!IS_ERR(fence)) {
- bool last_munmap_rebind = vma->gpuva.flags & XE_VMA_LAST_REBIND;
- LLIST_HEAD(deferred);
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- bind_pt_update.base.start,
- bind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
-
- /* TLB invalidation must be done before signaling rebind */
- if (ifence) {
- int err = invalidation_fence_init(tile->primary_gt,
- ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
- }
- fence = &ifence->base.base;
- }
-
- /* add shared fence now for pagetable delayed destroy */
- dma_resv_add_fence(xe_vm_resv(vm), fence, rebind ||
- last_munmap_rebind ?
- DMA_RESV_USAGE_KERNEL :
- DMA_RESV_USAGE_BOOKKEEP);
-
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_bind(vma, entries, num_entries, rebind,
- bind_pt_update.locked ? &deferred : NULL);
-
- /* This vma is live (again?) now */
- vma->tile_present |= BIT(tile->id);
-
- if (bind_pt_update.locked) {
- to_userptr_vma(vma)->userptr.initial_bind = true;
- up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
- if (!rebind && last_munmap_rebind &&
- xe_vm_in_preempt_fence_mode(vm))
- xe_vm_queue_rebind_worker(vm);
- } else {
- kfree(rfence);
- kfree(ifence);
- if (bind_pt_update.locked)
- up_read(&vm->userptr.notifier_lock);
- xe_pt_abort_bind(vma, entries, num_entries);
- }
-
- return fence;
-
-err:
- return ERR_PTR(err);
}
struct xe_pt_stage_unbind_walk {
@@ -1442,6 +1462,7 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
struct xe_pt *xe_child = container_of(*child, typeof(*xe_child), base);
pgoff_t end_offset;
u64 size = 1ull << walk->shifts[--level];
+ int err;
if (!IS_ALIGNED(addr, size))
addr = xe_walk->modified_start;
@@ -1457,7 +1478,10 @@ xe_pt_stage_unbind_post_descend(struct xe_ptw *parent, pgoff_t offset,
&end_offset))
return 0;
- (void)xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, false);
+ err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true);
+ if (err)
+ return err;
+
xe_walk->wupd.updates[level].update->qwords = end_offset - offset;
return 0;
@@ -1510,8 +1534,8 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
void *ptr, u32 qword_ofs, u32 num_qwords,
const struct xe_vm_pgtable_update *update)
{
- struct xe_vma *vma = pt_update->vma;
- u64 empty = __xe_pt_empty_pte(tile, xe_vma_vm(vma), update->pt->level);
+ struct xe_vm *vm = pt_update->vops->vm;
+ u64 empty = __xe_pt_empty_pte(tile, vm, update->pt->level);
int i;
if (map && map->is_iomem)
@@ -1525,181 +1549,644 @@ xe_migrate_clear_pgtable_callback(struct xe_migrate_pt_update *pt_update,
memset64(ptr, empty, num_qwords);
}
+static void xe_pt_abort_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
+{
+ int i, j;
+
+ xe_pt_commit_locks_assert(vma);
+
+ for (i = num_entries - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
+ struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
+
+ pt->num_live += entry->qwords;
+
+ if (!pt->level)
+ continue;
+
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
+ pt_dir->children[j] =
+ entries[i].pt_entries[j - entry->ofs].pt ?
+ &entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
+ }
+}
+
static void
-xe_pt_commit_unbind(struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 num_entries,
- struct llist_head *deferred)
+xe_pt_commit_prepare_unbind(struct xe_vma *vma,
+ struct xe_vm_pgtable_update *entries,
+ u32 num_entries)
{
- u32 j;
+ int i, j;
xe_pt_commit_locks_assert(vma);
- for (j = 0; j < num_entries; ++j) {
- struct xe_vm_pgtable_update *entry = &entries[j];
+ for (i = 0; i < num_entries; ++i) {
+ struct xe_vm_pgtable_update *entry = &entries[i];
struct xe_pt *pt = entry->pt;
+ struct xe_pt_dir *pt_dir;
pt->num_live -= entry->qwords;
- if (pt->level) {
- struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
- u32 i;
+ if (!pt->level)
+ continue;
- for (i = entry->ofs; i < entry->ofs + entry->qwords;
- i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i),
- xe_vma_vm(vma)->flags, deferred);
+ pt_dir = as_xe_pt_dir(pt);
+ for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
+ entry->pt_entries[j - entry->ofs].pt =
+ xe_pt_entry(pt_dir, j);
+ pt_dir->children[j] = NULL;
+ }
+ }
+}
- pt_dir->children[i] = NULL;
- }
+static void
+xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int i, level = 0;
+ u64 start, last;
+
+ for (i = 0; i < pt_op->num_entries; i++) {
+ const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
+
+ if (entry->pt->level > level)
+ level = entry->pt->level;
+ }
+
+ /* Greedy (non-optimal) calculation but simple */
+ start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
+ last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+
+ if (start < pt_update_ops->start)
+ pt_update_ops->start = start;
+ if (last > pt_update_ops->last)
+ pt_update_ops->last = last;
+}
+
+static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
+{
+ int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
+
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
+ xe->info.tile_count << shift);
+
+ return 0;
+}
+
+static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & vma->tile_present;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
+ &pt_op->num_entries);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+
+ /*
+ * If rebind, we have to invalidate TLB on !LR vms to invalidate
+ * cached PTEs point to freed memory. On LR vms this is done
+ * automatically when the context is re-enabled by the rebind worker,
+ * or in fault mode it was invalidated on PTE zapping.
+ *
+ * If !rebind, and scratch enabled VMs, there is a chance the scratch
+ * PTE is already cached in the TLB so it needs to be invalidated.
+ * On !LR VMs this is done in the ring ops preceding a batch, but on
+ * non-faulting LR, in particular on user-space batch buffer chaining,
+ * it needs to be done here.
+ */
+ if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
+ xe_vm_in_preempt_fence_mode(vm)))
+ pt_update_ops->needs_invalidation = true;
+ else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
+ /* We bump also if batch_invalidate_tlb is true */
+ vm->tlb_flush_seqno++;
+
+ vma->tile_staged |= BIT(tile->id);
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
+static int unbind_op_prepare(struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
+ return 0;
+
+ xe_bo_assert_held(xe_vma_bo(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing unbind, with range [%llx...%llx)\n",
+ xe_vma_start(vma), xe_vma_end(vma) - 1);
+
+ /*
+ * Wait for invalidation to complete. Can corrupt internal page table
+ * state if an invalidation is running while preparing an unbind.
+ */
+ if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
+ mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
+
+ pt_op->vma = vma;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ err = vma_reserve_fences(tile_to_xe(tile), vma);
+ if (err)
+ return err;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
+
+ return 0;
+}
+
+static int op_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op)
+{
+ int err = 0;
+
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va));
+
+ if (!err && op->remap.prev) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.prev);
+ pt_update_ops->wait_vm_bookkeep = true;
}
+ if (!err && op->remap.next) {
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ op->remap.next);
+ pt_update_ops->wait_vm_bookkeep = true;
+ }
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ err = unbind_op_prepare(tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va));
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ err = bind_op_prepare(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va));
+ pt_update_ops->wait_vm_kernel = true;
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
+
+ return err;
}
-static const struct xe_migrate_pt_update_ops unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static void
+xe_pt_update_ops_init(struct xe_vm_pgtable_update_ops *pt_update_ops)
+{
+ init_llist_head(&pt_update_ops->deferred);
+ pt_update_ops->start = ~0x0ull;
+ pt_update_ops->last = 0x0ull;
+}
+
+/**
+ * xe_pt_update_ops_prepare() - Prepare PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Prepare PT update operations which includes updating internal PT state,
+ * allocate memory for page tables, populate page table being pruned in, and
+ * create PT update operations for leaf insertion / removal.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct xe_vma_op *op;
+ int shift = tile->media_gt ? 1 : 0;
+ int err;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ xe_pt_update_ops_init(pt_update_ops);
+
+ err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
+ tile_to_xe(tile)->info.tile_count << shift);
+ if (err)
+ return err;
+
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_prepare(vops->vm, tile, pt_update_ops, op);
+
+ if (err)
+ return err;
+ }
+
+ xe_tile_assert(tile, pt_update_ops->current_op <=
+ pt_update_ops->num_ops);
+
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
+ return -ENOSPC;
+#endif
+
+ return 0;
+}
+
+static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present |= BIT(tile->id);
+ vma->tile_staged &= ~BIT(tile->id);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ to_userptr_vma(vma)->userptr.initial_bind = true;
+ }
+
+ /*
+ * Kick rebind worker if this bind triggers preempt fences and not in
+ * the rebind worker
+ */
+ if (pt_update_ops->wait_vm_bookkeep &&
+ xe_vm_in_preempt_fence_mode(vm) &&
+ !current->mm)
+ xe_vm_queue_rebind_worker(vm);
+}
+
+static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ vma->tile_present &= ~BIT(tile->id);
+ if (!vma->tile_present) {
+ list_del_init(&vma->combined_links.rebind);
+ if (xe_vma_is_userptr(vma)) {
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+ }
+}
+
+static void op_commit(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma_op *op, struct dma_fence *fence,
+ struct dma_fence *fence2)
+{
+ xe_vm_assert_held(vm);
+
+ switch (op->base.op) {
+ case DRM_GPUVA_OP_MAP:
+ if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ break;
+
+ bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+ fence2);
+ break;
+ case DRM_GPUVA_OP_REMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.remap.unmap->va), fence,
+ fence2);
+
+ if (op->remap.prev)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
+ fence, fence2);
+ if (op->remap.next)
+ bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
+ fence, fence2);
+ break;
+ case DRM_GPUVA_OP_UNMAP:
+ unbind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ break;
+ case DRM_GPUVA_OP_PREFETCH:
+ bind_op_commit(vm, tile, pt_update_ops,
+ gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ break;
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
+ }
+}
+
+static const struct xe_migrate_pt_update_ops migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_unbind_ops = {
- .populate = xe_migrate_clear_pgtable_callback,
+static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
.pre_commit = xe_pt_userptr_pre_commit,
};
/**
- * __xe_pt_unbind_vma() - Disconnect and free a page-table tree for the vma
- * address range.
- * @tile: The tile to unbind for.
- * @vma: The vma to unbind.
- * @q: The exec_queue with which to do pipelined page-table updates.
- * @syncs: Entries to sync on before disconnecting the tree to be destroyed.
- * @num_syncs: Number of @sync entries.
+ * xe_pt_update_ops_run() - Run PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
*
- * This function builds a the xe_vm_pgtable_update entries abstracting the
- * operations needed to detach the page-table tree to be destroyed from the
- * man vm tree.
- * It then takes the relevant locks and submits the operations for
- * pipelined detachment of the gpu page-table from the vm main tree,
- * (which can be done either by the cpu and the GPU), Finally it frees the
- * detached page-table tree.
+ * Run PT update operations which includes committing internal PT state changes,
+ * creating job for PT update operations for leaf insertion / removal, and
+ * installing job fence in various places.
*
- * Return: A valid dma-fence representing the pipelined detachment operation
- * on success, an error pointer on error.
+ * Return: fence on success, negative ERR_PTR on error.
*/
struct dma_fence *
-__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
- struct xe_sync_entry *syncs, u32 num_syncs)
+xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
{
- struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
- struct xe_pt_migrate_pt_update unbind_pt_update = {
- .base = {
- .ops = xe_vma_is_userptr(vma) ? &userptr_unbind_ops :
- &unbind_ops,
- .vma = vma,
- .tile_id = tile->id,
- },
- };
- struct xe_vm *vm = xe_vma_vm(vma);
- u32 num_entries;
- struct dma_fence *fence = NULL;
- struct invalidation_fence *ifence;
+ struct xe_vm *vm = vops->vm;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ struct dma_fence *fence;
+ struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
- int err;
-
- LLIST_HEAD(deferred);
+ struct xe_vma_op *op;
+ int err = 0, i;
+ struct xe_migrate_pt_update update = {
+ .ops = pt_update_ops->needs_userptr_lock ?
+ &userptr_migrate_ops :
+ &migrate_ops,
+ .vops = vops,
+ .tile_id = tile->id,
+ };
- xe_bo_assert_held(xe_vma_bo(vma));
+ lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "Preparing unbind, with range [%llx...%llx) engine %p.\n",
- xe_vma_start(vma), xe_vma_end(vma), q);
+ if (!pt_update_ops->current_op) {
+ xe_tile_assert(tile, xe_vm_in_fault_mode(vm));
- num_entries = xe_pt_stage_unbind(tile, vma, entries);
- xe_tile_assert(tile, num_entries <= ARRAY_SIZE(entries));
-
- xe_vm_dbg_print_entries(tile_to_xe(tile), entries, num_entries);
- xe_pt_calc_rfence_interval(vma, &unbind_pt_update, entries,
- num_entries);
+ return dma_fence_get_stub();
+ }
- err = dma_resv_reserve_fences(xe_vm_resv(vm), 1);
- if (!err && !xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- err = dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv, 1);
- if (err)
- return ERR_PTR(err);
+#ifdef TEST_VM_OPS_ERROR
+ if (vops->inject_error &&
+ vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
+ return ERR_PTR(-ENOSPC);
+#endif
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence)
- return ERR_PTR(-ENOMEM);
+ if (pt_update_ops->needs_invalidation) {
+ ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto kill_vm_tile1;
+ }
+ if (tile->media_gt) {
+ mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!mfence) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ cf = dma_fence_array_alloc(2);
+ if (!cf) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ }
+ }
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
- kfree(ifence);
- return ERR_PTR(-ENOMEM);
+ err = -ENOMEM;
+ goto free_ifence;
}
- /*
- * Even if we were already evicted and unbind to destroy, we need to
- * clear again here. The eviction may have updated pagetables at a
- * lower level, because it needs to be more conservative.
- */
- fence = xe_migrate_update_pgtables(tile->migrate,
- vm, NULL, q ? q :
- vm->q[tile->id],
- entries, num_entries,
- syncs, num_syncs,
- &unbind_pt_update.base);
- if (!IS_ERR(fence)) {
- int err;
-
- err = xe_range_fence_insert(&vm->rftree[tile->id], rfence,
- &xe_range_fence_kfree_ops,
- unbind_pt_update.base.start,
- unbind_pt_update.base.last, fence);
- if (err)
- dma_fence_wait(fence, false);
+ fence = xe_migrate_update_pgtables(tile->migrate, &update);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_rfence;
+ }
- /* TLB invalidation must be done before signaling unbind */
- err = invalidation_fence_init(tile->primary_gt, ifence, fence,
- xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
- if (err) {
- dma_fence_put(fence);
- kfree(ifence);
- return ERR_PTR(err);
+ /* Point of no return - VM killed if failure after this */
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_commit(pt_op->vma, pt_op->entries,
+ pt_op->num_entries, &pt_update_ops->deferred);
+ pt_op->vma = NULL; /* skip in xe_pt_update_ops_abort */
+ }
+
+ if (xe_range_fence_insert(&vm->rftree[tile->id], rfence,
+ &xe_range_fence_kfree_ops,
+ pt_update_ops->start,
+ pt_update_ops->last, fence))
+ dma_fence_wait(fence, false);
+
+ /* tlb invalidation must be done before signaling rebind */
+ if (ifence) {
+ if (mfence)
+ dma_fence_get(fence);
+ invalidation_fence_init(tile->primary_gt, ifence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ if (mfence) {
+ invalidation_fence_init(tile->media_gt, mfence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ fences[0] = &ifence->base.base;
+ fences[1] = &mfence->base.base;
+ dma_fence_array_init(cf, 2, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ fence = &cf->base;
+ } else {
+ fence = &ifence->base.base;
}
- fence = &ifence->base.base;
+ }
- /* add shared fence now for pagetable delayed destroy */
+ if (!mfence) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- /* This fence will be installed by caller when doing eviction */
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
- dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
- DMA_RESV_USAGE_BOOKKEEP);
- xe_pt_commit_unbind(vma, entries, num_entries,
- unbind_pt_update.locked ? &deferred : NULL);
- vma->tile_present &= ~BIT(tile->id);
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
} else {
- kfree(rfence);
- kfree(ifence);
- }
+ dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (!vma->tile_present)
- list_del_init(&vma->combined_links.rebind);
+ dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- if (unbind_pt_update.locked) {
- xe_tile_assert(tile, xe_vma_is_userptr(vma));
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op,
+ &ifence->base.base, &mfence->base.base);
+ }
- if (!vma->tile_present) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
+ if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
- xe_bo_put_commit(&deferred);
- }
return fence;
+
+free_rfence:
+ kfree(rfence);
+free_ifence:
+ kfree(cf);
+ kfree(fences);
+ kfree(mfence);
+ kfree(ifence);
+kill_vm_tile1:
+ if (err != -EAGAIN && tile->id)
+ xe_vm_kill(vops->vm, false);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_pt_update_ops_fini() - Finish PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operations
+ *
+ * Finish PT update operations by committing to destroy page table memory
+ */
+void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = 0; i < pt_update_ops->current_op; ++i) {
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
+
+ xe_pt_free_bind(pt_op->entries, pt_op->num_entries);
+ }
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
+}
+
+/**
+ * xe_pt_update_ops_abort() - Abort PT update operations
+ * @tile: Tile of PT update operations
+ * @vops: VMA operationa
+ *
+ * Abort PT update operations by unwinding internal PT state
+ */
+void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops)
+{
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[tile->id];
+ int i;
+
+ lockdep_assert_held(&vops->vm->lock);
+ xe_vm_assert_held(vops->vm);
+
+ for (i = pt_update_ops->num_ops - 1; i >= 0; --i) {
+ struct xe_vm_pgtable_update_op *pt_op =
+ &pt_update_ops->ops[i];
+
+ if (!pt_op->vma || i >= pt_update_ops->current_op)
+ continue;
+
+ if (pt_op->bind)
+ xe_pt_abort_bind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries,
+ pt_op->rebind);
+ else
+ xe_pt_abort_unbind(pt_op->vma, pt_op->entries,
+ pt_op->num_entries);
+ }
+
+ xe_bo_put_commit(&vops->pt_update_ops[tile->id].deferred);
}