diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-04-07 16:20:35 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-04-10 08:56:04 +0200 |
commit | de4e783a3f86f63b03303b463cd7ef885e14b476 (patch) | |
tree | 1f5c9e82c88bad8eafb368b0021860301147d00a /drivers/gpu/drm/i915/i915_gem_batch_pool.c | |
parent | ed9ddd25b2055d29e2106da2a6340e2614d71e86 (diff) | |
download | lwn-de4e783a3f86f63b03303b463cd7ef885e14b476.tar.gz lwn-de4e783a3f86f63b03303b463cd7ef885e14b476.zip |
drm/i915: Tidy batch pool logic
Move the madvise logic out of the execbuffer main path into the
relatively rare allocation path, making the execbuffer manipulation less
fragile.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_batch_pool.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_batch_pool.c | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 564be7c5ea7e..21f3356cc0ab 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -67,25 +67,23 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) struct drm_i915_gem_object, batch_pool_list); - WARN_ON(obj->active); - - list_del_init(&obj->batch_pool_list); + list_del(&obj->batch_pool_list); drm_gem_object_unreference(&obj->base); } } /** - * i915_gem_batch_pool_get() - select a buffer from the pool + * i915_gem_batch_pool_get() - allocate a buffer from the pool * @pool: the batch buffer pool * @size: the minimum desired size of the returned buffer * - * Finds or allocates a batch buffer in the pool with at least the requested - * size. The caller is responsible for any domain, active/inactive, or - * purgeability management for the returned buffer. + * Returns an inactive buffer from @pool with at least @size bytes, + * with the pages pinned. The caller must i915_gem_object_unpin_pages() + * on the returned object. * * Note: Callers must hold the struct_mutex * - * Return: the selected batch buffer object + * Return: the buffer object or an error pointer */ struct drm_i915_gem_object * i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, @@ -97,8 +95,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); list_for_each_entry_safe(tmp, next, - &pool->cache_list, batch_pool_list) { - + &pool->cache_list, batch_pool_list) { if (tmp->active) continue; @@ -114,25 +111,27 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, * but not 'too much' bigger. A better way to do this * might be to bucket the pool objects based on size. */ - if (tmp->base.size >= size && - tmp->base.size <= (2 * size)) { + if (tmp->base.size >= size && tmp->base.size <= 2 * size) { obj = tmp; break; } } - if (!obj) { + if (obj == NULL) { + int ret; + obj = i915_gem_alloc_object(pool->dev, size); - if (!obj) + if (obj == NULL) return ERR_PTR(-ENOMEM); - list_add_tail(&obj->batch_pool_list, &pool->cache_list); - } - else - /* Keep list in LRU order */ - list_move_tail(&obj->batch_pool_list, &pool->cache_list); + ret = i915_gem_object_get_pages(obj); + if (ret) + return ERR_PTR(ret); - obj->madv = I915_MADV_WILLNEED; + obj->madv = I915_MADV_DONTNEED; + } + list_move_tail(&obj->batch_pool_list, &pool->cache_list); + i915_gem_object_pin_pages(obj); return obj; } |