summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2015-08-05 12:41:48 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:18 -0400
commit5ceb54c68a28fc8af5cf8d32c4fde29c97dd3c18 (patch)
tree2202b44528b6471bdc11fcf580b5348ec7354f48 /drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
parentc6a4079badc2f0eda69a028622c7080a284ae147 (diff)
downloadlwn-5ceb54c68a28fc8af5cf8d32c4fde29c97dd3c18.tar.gz
lwn-5ceb54c68a28fc8af5cf8d32c4fde29c97dd3c18.zip
drm/amdgpu: add fence suspend/resume functions
Added to: - handle draining the ring on suspend - properly enable/disable interrupts on suspend and resume Fix breakages from: commit 467ee3be53d240d08beed2e82a941e820c1ac323 Author: Chunming Zhou <david1.zhou@amd.com> Date: Mon Jun 1 14:14:32 2015 +0800 drm/amdgpu: always enable EOP interrupt v2 Tested-by: Audrey Grodzovsky <audrey.grodzovsky@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c61
1 files changed, 61 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index b89dafec9ecf..1b0bc07d0c01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -956,6 +956,67 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
}
/**
+ * amdgpu_fence_driver_suspend - suspend the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Suspend the fence driver for all possible rings (all asics).
+ */
+void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ mutex_lock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+
+ /* wait for gpu to finish processing current batch */
+ r = amdgpu_fence_wait_empty(ring);
+ if (r) {
+ /* delay GPU reset to resume */
+ amdgpu_fence_driver_force_completion(adev);
+ }
+
+ /* disable the interrupt */
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+ mutex_unlock(&adev->ring_lock);
+}
+
+/**
+ * amdgpu_fence_driver_resume - resume the fence driver
+ * for all possible rings.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Resume the fence driver for all possible rings (all asics).
+ * Not all asics have all rings, so each asic will only
+ * start the fence driver on the rings it has using
+ * amdgpu_fence_driver_start_ring().
+ * Returns 0 for success.
+ */
+void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+{
+ int i;
+
+ mutex_lock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+
+ /* enable the interrupt */
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+ mutex_unlock(&adev->ring_lock);
+}
+
+/**
* amdgpu_fence_driver_force_completion - force all fence waiter to complete
*
* @adev: amdgpu device pointer