summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c118
1 files changed, 46 insertions, 72 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1054d66c54fa..8048a4c04b47 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -89,16 +89,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq;
}
-static void amdgpu_fence_save_fence_wptr_start(struct amdgpu_fence *af)
-{
- af->fence_wptr_start = af->ring->wptr;
-}
-
-static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
-{
- af->fence_wptr_end = af->ring->wptr;
-}
-
/**
* amdgpu_fence_emit - emit a fence on the requested ring
*
@@ -107,16 +97,14 @@ static void amdgpu_fence_save_fence_wptr_end(struct amdgpu_fence *af)
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
- * Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
- unsigned int flags)
+void amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
- int r;
fence = &af->base;
af->ring = ring;
@@ -126,11 +114,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
&ring->fence_drv.lock,
adev->fence_context + ring->idx, seq);
- amdgpu_fence_save_fence_wptr_start(af);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- amdgpu_fence_save_fence_wptr_end(af);
- amdgpu_fence_save_wptr(af);
+
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -141,10 +127,13 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
rcu_read_unlock();
if (old) {
- r = dma_fence_wait(old, false);
+ /*
+ * dma_fence_wait(old, false) is not interruptible.
+ * It will not return an error in this case.
+ * So we can safely ignore the return value.
+ */
+ dma_fence_wait(old, false);
dma_fence_put(old);
- if (r)
- return r;
}
}
@@ -154,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
* emitting the fence would mess up the hardware ring buffer.
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
-
- return 0;
}
/**
@@ -241,7 +228,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
do {
struct dma_fence *fence, **ptr;
- struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -254,12 +240,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- /* Save the wptr in the fence driver so we know what the last processed
- * wptr was. This is required for re-emitting the ring state for
- * queues that are reset but are not guilty and thus have no guilty fence.
- */
- am_fence = container_of(fence, struct amdgpu_fence, base);
- drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -458,7 +438,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* @ring: ring to init the fence driver on
*
* Init the fence driver for the requested ring (all asics).
- * Helper function for amdgpu_fence_driver_init().
+ * Helper function for amdgpu_fence_driver_sw_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
@@ -708,25 +688,29 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
*/
/**
- * amdgpu_fence_driver_update_timedout_fence_state - Update fence state and set errors
+ * amdgpu_ring_set_fence_errors_and_reemit - Set dma_fence errors and reemit
*
- * @af: fence of the ring to update
+ * @ring: the ring to operate on
+ * @guilty_fence: fence of the ring to update
*
*/
-void amdgpu_fence_driver_update_timedout_fence_state(struct amdgpu_fence *af)
+void amdgpu_ring_set_fence_errors_and_reemit(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
{
struct dma_fence *unprocessed;
struct dma_fence __rcu **ptr;
struct amdgpu_fence *fence;
- struct amdgpu_ring *ring = af->ring;
unsigned long flags;
u32 seq, last_seq;
- bool reemitted = false;
+ unsigned int i;
+ bool is_guilty_fence;
+ bool is_guilty_context;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
- /* mark all fences from the guilty context with an error */
+ ring->reemit = true;
+ amdgpu_ring_alloc(ring, ring->ring_backup_entries_to_copy);
spin_lock_irqsave(&ring->fence_drv.lock, flags);
do {
last_seq++;
@@ -738,39 +722,45 @@ void amdgpu_fence_driver_update_timedout_fence_state(struct amdgpu_fence *af)
if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
+ is_guilty_fence = fence == guilty_fence;
+ is_guilty_context = fence->context == guilty_fence->context;
- if (fence->reemitted > 1)
- reemitted = true;
- else if (fence == af)
+ /* mark all fences from the guilty context with an error */
+ if (is_guilty_fence)
dma_fence_set_error(&fence->base, -ETIME);
- else if (fence->context == af->context)
+ else if (is_guilty_context)
dma_fence_set_error(&fence->base, -ECANCELED);
+
+ /* reemit the packet stream and update wptrs */
+ fence->ib_wptr = ring->wptr;
+ for (i = 0; i < fence->ib_dw_size; i++) {
+ /* Skip the IB(s) for the guilty context. */
+ if (is_guilty_context &&
+ i >= fence->skip_ib_dw_start_offset &&
+ i < fence->skip_ib_dw_end_offset)
+ amdgpu_ring_write(ring, ring->funcs->nop);
+ else
+ amdgpu_ring_write(ring,
+ ring->ring_backup[fence->backup_idx + i]);
+ }
}
rcu_read_unlock();
} while (last_seq != seq);
spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
-
- if (reemitted) {
- /* if we've already reemitted once then just cancel everything */
- amdgpu_fence_driver_force_completion(af->ring);
- af->ring->ring_backup_entries_to_copy = 0;
- }
-}
-
-void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
-{
- af->wptr = af->ring->wptr;
+ amdgpu_ring_commit(ring);
+ ring->reemit = false;
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
- u64 start_wptr, u64 end_wptr)
+ struct amdgpu_fence *af)
{
- unsigned int first_idx = start_wptr & ring->buf_mask;
- unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int first_idx = af->ib_wptr & ring->buf_mask;
+ unsigned int dw_size = af->ib_dw_size;
unsigned int i;
+ af->backup_idx = ring->ring_backup_entries_to_copy;
/* Backup the contents of the ring buffer. */
- for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ for (i = first_idx; dw_size > 0; ++i, i &= ring->buf_mask, --dw_size)
ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
}
@@ -780,12 +770,10 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
struct dma_fence *unprocessed;
struct dma_fence __rcu **ptr;
struct amdgpu_fence *fence;
- u64 wptr;
u32 seq, last_seq;
last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
- wptr = ring->fence_drv.signalled_wptr;
ring->ring_backup_entries_to_copy = 0;
do {
@@ -799,21 +787,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
fence = container_of(unprocessed, struct amdgpu_fence, base);
- /* save everything if the ring is not guilty, otherwise
- * just save the content from other contexts.
- */
- if (!fence->reemitted &&
- (!guilty_fence || (fence->context != guilty_fence->context))) {
- amdgpu_ring_backup_unprocessed_command(ring, wptr,
- fence->wptr);
- } else if (!fence->reemitted) {
- /* always save the fence */
- amdgpu_ring_backup_unprocessed_command(ring,
- fence->fence_wptr_start,
- fence->fence_wptr_end);
- }
- wptr = fence->wptr;
- fence->reemitted++;
+ amdgpu_ring_backup_unprocessed_command(ring, fence);
}
rcu_read_unlock();
} while (last_seq != seq);