diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-08-25 17:23:45 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-26 17:50:42 -0400 |
commit | c7ae72c01be10f539f385f624713f8ba0aa11a8f (patch) | |
tree | c523633396afdeede8a47c386436bf0dd4e541ac /drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |
parent | 113cd9dacbad4906e1c8db09233b9fdcb002f80d (diff) | |
download | lwn-c7ae72c01be10f539f385f624713f8ba0aa11a8f.tar.gz lwn-c7ae72c01be10f539f385f624713f8ba0aa11a8f.zip |
drm/amdgpu: use IB for copy buffer of eviction
This aids handling buffers moves with the scheduler.
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 77 |
1 files changed, 40 insertions, 37 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 4cb81320b045..399143541d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -228,7 +228,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct amdgpu_device *adev; struct amdgpu_ring *ring; uint64_t old_start, new_start; - struct amdgpu_fence *fence; + struct fence *fence; int r; adev = amdgpu_get_adev(bo->bdev); @@ -269,9 +269,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, &fence->base, + r = ttm_bo_move_accel_cleanup(bo, fence, evict, no_wait_gpu, new_mem); - amdgpu_fence_unref(&fence); + fence_put(fence); return r; } @@ -987,52 +987,48 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t dst_offset, uint32_t byte_count, struct reservation_object *resv, - struct amdgpu_fence **fence) + struct fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_sync sync; uint32_t max_bytes; unsigned num_loops, num_dw; + struct amdgpu_ib *ib; unsigned i; int r; - /* sync other rings */ - amdgpu_sync_create(&sync); - if (resv) { - r = amdgpu_sync_resv(adev, &sync, resv, false); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - amdgpu_sync_free(adev, &sync, NULL); - return r; - } - } - max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; - /* for fence and sync */ - num_dw += 64 + AMDGPU_NUM_SYNCS * 8; + /* for IB padding */ + while (num_dw & 0x7) + num_dw++; + + ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); + if (!ib) + return -ENOMEM; - r = amdgpu_sync_wait(&sync); + r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib); if (r) { - DRM_ERROR("sync wait failed (%d).\n", r); - amdgpu_sync_free(adev, &sync, NULL); + kfree(ib); return r; } - r = amdgpu_ring_lock(ring, num_dw); - if (r) { - DRM_ERROR("ring lock failed (%d).\n", r); - amdgpu_sync_free(adev, &sync, NULL); - return r; + ib->length_dw = 0; + + if (resv) { + r = amdgpu_sync_resv(adev, &ib->sync, resv, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + goto error_free; + } } - amdgpu_sync_rings(&sync, ring); for (i = 0; i < num_loops; i++) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); - amdgpu_emit_copy_buffer(adev, ring, src_offset, dst_offset, + amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset, cur_size_in_bytes); src_offset += cur_size_in_bytes; @@ -1040,17 +1036,24 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, byte_count -= cur_size_in_bytes; } - r = amdgpu_fence_emit(ring, AMDGPU_FENCE_OWNER_MOVE, fence); - if (r) { - amdgpu_ring_unlock_undo(ring); - amdgpu_sync_free(adev, &sync, NULL); - return r; - } - - amdgpu_ring_unlock_commit(ring); - amdgpu_sync_free(adev, &sync, &(*fence)->base); + amdgpu_vm_pad_ib(adev, ib); + WARN_ON(ib->length_dw > num_dw); + r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1, + &amdgpu_vm_free_job, + AMDGPU_FENCE_OWNER_MOVE, + fence); + if (r) + goto error_free; + if (!amdgpu_enable_scheduler) { + amdgpu_ib_free(adev, ib); + kfree(ib); + } return 0; +error_free: + amdgpu_ib_free(adev, ib); + kfree(ib); + return r; } #if defined(CONFIG_DEBUG_FS) |