summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-05-29 17:43:32 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-06-23 14:02:03 +0200
commit535fbe8233d164e76bac515dd7efee699093cea9 (patch)
tree93348073d5d321d8d5c6f9a24f45374cdce5c8d6 /drivers/gpu/drm/i915/i915_gem_execbuffer.c
parent95c24161cd8561bd2f866a802a44b28fd0a867b7 (diff)
downloadlwn-535fbe8233d164e76bac515dd7efee699093cea9.tar.gz
lwn-535fbe8233d164e76bac515dd7efee699093cea9.zip
drm/i915: Update move_to_gpu() to take a request structure
The plan is to pass requests around as the basic submission tracking structure rather than rings and contexts. This patch updates the move_to_gpu() code paths. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 610c3307a02a..f6ce811a024a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -891,10 +891,10 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(ring);
+ const unsigned other_rings = ~intel_ring_flag(req->ring);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -904,7 +904,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
}
@@ -916,7 +916,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
- i915_gem_chipset_flush(ring->dev);
+ i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -924,7 +924,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(req->ring);
}
static bool
@@ -1246,7 +1246,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;