diff options
author | Jani Nikula <jani.nikula@intel.com> | 2020-01-09 17:19:12 +0200 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2020-01-09 17:19:12 +0200 |
commit | ec027b33c8bb164430224ce1f5cb733036b20d3f (patch) | |
tree | 1f12e08a45ebed8c6fc34948b54f53da621c8976 /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
parent | 6251215fe56c39e07de8bcb3a5ecb19a80ef423e (diff) | |
parent | a566696cf9f27f701cd4a6426d46f112c180059c (diff) | |
download | lwn-ec027b33c8bb164430224ce1f5cb733036b20d3f.tar.gz lwn-ec027b33c8bb164430224ce1f5cb733036b20d3f.zip |
Merge drm/drm-next into drm-intel-next-queued
Sync with drm-next to get the new logging macros, among other things.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 103 |
1 files changed, 92 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 598c24505c73..8f26504a59a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -562,8 +562,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, { entry->priority = 0; entry->tv.bo = &vm->root.base.bo->tbo; - /* One for the VM updates, one for TTM and one for the CS job */ - entry->tv.num_shared = 3; + /* One for TTM and one for the CS job */ + entry->tv.num_shared = 2; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } @@ -656,7 +656,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct amdgpu_vm_bo_base *bo_base, *tmp; - int r = 0; + int r; vm->bulk_moveable &= list_empty(&vm->evicted); @@ -665,7 +665,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = validate(param, bo); if (r) - break; + return r; if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); @@ -678,7 +678,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - return r; + mutex_lock(&vm->eviction_lock); + vm->evicting = false; + mutex_unlock(&vm->eviction_lock); + + return 0; } /** @@ -1555,15 +1559,25 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_KFD; + mutex_lock(&vm->eviction_lock); + if (vm->evicting) { + r = -EBUSY; + goto error_unlock; + } + r = vm->update_funcs->prepare(¶ms, owner, exclusive); if (r) - return r; + goto error_unlock; r = amdgpu_vm_update_ptes(¶ms, start, last + 1, addr, flags); if (r) - return r; + goto error_unlock; + + r = vm->update_funcs->commit(¶ms, fence); - return vm->update_funcs->commit(¶ms, fence); +error_unlock: + mutex_unlock(&vm->eviction_lock); + return r; } /** @@ -2500,6 +2514,41 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, } /** + * amdgpu_vm_evictable - check if we can evict a VM + * + * @bo: A page table of the VM. + * + * Check if it is possible to evict a VM. + */ +bool amdgpu_vm_evictable(struct amdgpu_bo *bo) +{ + struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; + + /* Page tables of a destroyed VM can go away immediately */ + if (!bo_base || !bo_base->vm) + return true; + + /* Don't evict VM page tables while they are busy */ + if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + return false; + + /* Try to block ongoing updates */ + if (!mutex_trylock(&bo_base->vm->eviction_lock)) + return false; + + /* Don't evict VM page tables while they are updated */ + if (!dma_fence_is_signaled(bo_base->vm->last_direct) || + !dma_fence_is_signaled(bo_base->vm->last_delayed)) { + mutex_unlock(&bo_base->vm->eviction_lock); + return false; + } + + bo_base->vm->evicting = true; + mutex_unlock(&bo_base->vm->eviction_lock); + return true; +} + +/** * amdgpu_vm_bo_invalidate - mark the bo as invalid * * @adev: amdgpu_device pointer @@ -2661,8 +2710,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, + true, true, timeout); + if (timeout <= 0) + return timeout; + + timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout); + if (timeout <= 0) + return timeout; + + return dma_fence_wait_timeout(vm->last_delayed, true, timeout); } /** @@ -2708,6 +2765,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, goto error_free_direct; vm->pte_support_ats = false; + vm->is_compute_context = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & @@ -2730,6 +2788,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, else vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; + vm->last_direct = dma_fence_get_stub(); + vm->last_delayed = dma_fence_get_stub(); + + mutex_init(&vm->eviction_lock); + vm->evicting = false; amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) @@ -2780,6 +2843,8 @@ error_free_root: vm->root.base.bo = NULL; error_free_delayed: + dma_fence_put(vm->last_direct); + dma_fence_put(vm->last_delayed); drm_sched_entity_destroy(&vm->delayed); error_free_direct: @@ -2893,6 +2958,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; dma_fence_put(vm->last_update); vm->last_update = NULL; + vm->is_compute_context = true; if (vm->pasid) { unsigned long flags; @@ -2947,6 +3013,7 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } vm->pasid = 0; + vm->is_compute_context = false; } /** @@ -2978,6 +3045,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->pasid = 0; } + dma_fence_wait(vm->last_direct, false); + dma_fence_put(vm->last_direct); + dma_fence_wait(vm->last_delayed, false); + dma_fence_put(vm->last_delayed); + list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); @@ -3194,11 +3266,20 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | AMDGPU_PTE_SYSTEM; - if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + if (vm->is_compute_context) { + /* Intentionally setting invalid PTE flag + * combination to force a no-retry-fault + */ + flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | + AMDGPU_PTE_TF; + value = 0; + + } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ value = adev->dummy_page_addr; flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE; + } else { /* Let the hw retry silently on the PTE */ value = 0; |