summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-04-07 16:20:31 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-04-10 08:56:02 +0200
commit6ad790c0f5ac55fd13f322c23519f0d6f0721864 (patch)
tree766a798328c05a8daefb4948a17316683c4c507c /drivers/gpu/drm/i915/intel_pm.c
parentedcf284bfe9c94eab2923b13cfff7456c0dc7dc6 (diff)
downloadlwn-6ad790c0f5ac55fd13f322c23519f0d6f0721864.tar.gz
lwn-6ad790c0f5ac55fd13f322c23519f0d6f0721864.zip
drm/i915: Boost GPU frequency if we detect outstanding pageflips
If we hit a vblank and see that have a pageflip queue but not yet processed, ensure that the GPU is running at maximum in order to clear the backlog. Pageflips are only queued for the following vblank, if we miss it, there will be a visible stutter. Boosting the GPU frequency doesn't prevent us from missing the target vblank, but it should help the subsequent frames hitting theirs. v2: Reorder vblank vs flip-complete so that we only check for a missed flip after processing the completion events, and avoid spurious boosts. v3: Rename missed_vblank v4: Rebase v5: Cancel the outstanding work in runtime suspend v6: Rebase v7: Rebase required fixing Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Deepak S<deepak.s@linux.intel.com> Reviewed-by: Deepak S<deepak.s@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9c705dec853e..acf1a318fda9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6772,6 +6772,41 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return val / GT_FREQUENCY_MULTIPLIER;
}
+struct request_boost {
+ struct work_struct work;
+ struct drm_i915_gem_request *rq;
+};
+
+static void __intel_rps_boost_work(struct work_struct *work)
+{
+ struct request_boost *boost = container_of(work, struct request_boost, work);
+
+ if (!i915_gem_request_completed(boost->rq, true))
+ gen6_rps_boost(to_i915(boost->rq->ring->dev));
+
+ i915_gem_request_unreference__unlocked(boost->rq);
+ kfree(boost);
+}
+
+void intel_queue_rps_boost_for_request(struct drm_device *dev,
+ struct drm_i915_gem_request *rq)
+{
+ struct request_boost *boost;
+
+ if (rq == NULL || INTEL_INFO(dev)->gen < 6)
+ return;
+
+ boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
+ if (boost == NULL)
+ return;
+
+ i915_gem_request_reference(rq);
+ boost->rq = rq;
+
+ INIT_WORK(&boost->work, __intel_rps_boost_work);
+ queue_work(to_i915(dev)->wq, &boost->work);
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;