summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
diff options
context:
space:
mode:
authorNicholas Kazlauskas <nicholas.kazlauskas@amd.com>2021-08-03 16:32:31 -0400
committerAlex Deucher <alexander.deucher@amd.com>2021-08-16 15:35:56 -0400
commit58aa1c50e5a231c0912b26bce0d15c44d5384fad (patch)
treedb5150b54c82526a292b962bdf75d2e10d25315c /drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
parent09a5df6c444cf092f87da0fa113b412138253f9b (diff)
downloadlwn-58aa1c50e5a231c0912b26bce0d15c44d5384fad.tar.gz
lwn-58aa1c50e5a231c0912b26bce0d15c44d5384fad.zip
drm/amd/display: Use vblank control events for PSR enable/disable
[Why] PSR can disable the HUBP along with the OTG when PSR is active. We'll hit a pageflip timeout when the OTG is disable because we're no longer updating the CRTC vblank counter and the pflip high IRQ will not fire on the flip. In order to flip the page flip timeout occur we should modify the enter/exit conditions to match DRM requirements. [How] Use our deferred handlers for DRM vblank control to notify DMCU(B) when it can enable or disable PSR based on whether vblank is disabled or enabled respectively. We'll need to pass along the stream with the notification now because we want to access the CRTC state while the CRTC is locked to get the stream state prior to the commit. Retain a reference to the stream so it remains safe to continue to access and release that reference once we're done with it. Enable/disable logic follows what we were previously doing in update_planes. The workqueue has to be flushed before programming streams or planes to ensure that we exit out of idle optimizations and PSR before these events occur if necessary. To keep the skip count logic the same to avoid FBCON PSR enablement requires copying the allow condition onto the DM IRQ parameters - a field that we can actually access from the worker. Reviewed-by: Roman Li <Roman.Li@amd.com> Acked-by: Wayne Lin <wayne.lin@amd.com> Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com> Tested-by: Daniel Wheeler <daniel.wheeler@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c48
1 files changed, 40 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f88b6c5b83cd..cebd663b6708 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1061,7 +1061,22 @@ static void vblank_control_worker(struct work_struct *work)
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
+ /* Control PSR based on vblank requirements from OS */
+ if (vblank_work->stream && vblank_work->stream->link) {
+ if (vblank_work->enable) {
+ if (vblank_work->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(vblank_work->stream);
+ } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
+ !vblank_work->stream->link->psr_settings.psr_allow_active &&
+ vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
+ amdgpu_dm_psr_enable(vblank_work->stream);
+ }
+ }
+
mutex_unlock(&dm->dc_lock);
+
+ dc_stream_release(vblank_work->stream);
+
kfree(vblank_work);
}
@@ -6018,6 +6033,11 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
work->acrtc = acrtc;
work->enable = enable;
+ if (acrtc_state->stream) {
+ dc_stream_retain(acrtc_state->stream);
+ work->stream = acrtc_state->stream;
+ }
+
queue_work(dm->vblank_control_workqueue, &work->work);
#endif
@@ -8623,6 +8643,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
/* Update the planes if changed or disable if we don't have any. */
if ((planes_count || acrtc_state->active_planes == 0) &&
acrtc_state->stream) {
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ flush_workqueue(dm->vblank_control_workqueue);
+
bundle->stream_update.stream = acrtc_state->stream;
if (new_pcrtc_state->mode_changed) {
bundle->stream_update.src = acrtc_state->stream->src;
@@ -8691,16 +8717,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
- else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
- !acrtc_state->stream->link->psr_settings.psr_allow_active) {
- struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
- acrtc_state->stream->dm_stream_context;
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
if (aconn->psr_skip_count > 0)
aconn->psr_skip_count--;
- else
- amdgpu_dm_psr_enable(acrtc_state->stream);
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
mutex_unlock(&dm->dc_lock);
@@ -8949,8 +8979,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) {
/* if there mode set or reset, disable eDP PSR */
- if (mode_set_reset_required)
+ if (mode_set_reset_required) {
+ flush_workqueue(dm->vblank_control_workqueue);
amdgpu_dm_psr_disable_all(dm);
+ }
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);