diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2019-05-30 21:35:00 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2019-05-31 21:23:51 +0100 |
commit | d82b4b26218d359eeba3f401c9fc649388641b1a (patch) | |
tree | 0a13b3cd7eef299a0812270400843d7b94ec2389 /drivers/gpu/drm/i915/gem | |
parent | 3b4fa9640ccded07fff6d563d3ac1b2f3f111d97 (diff) | |
download | lwn-d82b4b26218d359eeba3f401c9fc649388641b1a.tar.gz lwn-d82b4b26218d359eeba3f401c9fc649388641b1a.zip |
drm/i915: Report all objects with allocated pages to the shrinker
Currently, we try to report to the shrinker the precise number of
objects (pages) that are available to be reaped at this moment. This
requires searching all objects with allocated pages to see if they
fulfill the search criteria, and this count is performed quite
frequently. (The shrinker tries to free ~128 pages on each invocation,
before which we count all the objects; counting takes longer than
unbinding the objects!) If we take the pragmatic view that with
sufficient desire, all objects are eventually reapable (they become
inactive, or no longer used as framebuffer etc), we can simply return
the count of pinned pages maintained during get_pages/put_pages rather
than walk the lists every time.
The downside is that we may (slightly) over-report the number of
objects/pages we could shrink and so penalize ourselves by shrinking
more than required. This is mitigated by keeping the order in which we
shrink objects such that we avoid penalizing active and frequently used
objects, and if memory is so tight that we need to free them we would
need to anyway.
v2: Only expose shrinkable objects to the shrinker; a small reduction in
not considering stolen and foreign objects.
v3: Restore the tracking from a "backup" copy from before the gem/ split
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190530203500.26272-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_domain.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_object.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 |
5 files changed, 31 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 52b73e90c9f4..e5deae62681f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -475,7 +475,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) } mutex_unlock(&i915->ggtt.vm.mutex); - if (obj->mm.madv == I915_MADV_WILLNEED) { + if (i915_gem_object_is_shrinkable(obj) && + obj->mm.madv == I915_MADV_WILLNEED) { struct list_head *list; spin_lock(&i915->mm.obj_lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 1ec60be06755..b840cf179bbe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -44,25 +44,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) return kmem_cache_free(global.slab_objects, obj); } -/* some bookkeeping */ -static void i915_gem_info_add_obj(struct drm_i915_private *i915, - u64 size) -{ - spin_lock(&i915->mm.object_stat_lock); - i915->mm.object_count++; - i915->mm.object_memory += size; - spin_unlock(&i915->mm.object_stat_lock); -} - -static void i915_gem_info_remove_obj(struct drm_i915_private *i915, - u64 size) -{ - spin_lock(&i915->mm.object_stat_lock); - i915->mm.object_count--; - i915->mm.object_memory -= size; - spin_unlock(&i915->mm.object_stat_lock); -} - static void frontbuffer_retire(struct i915_active_request *active, struct i915_request *request) @@ -98,8 +79,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, obj->mm.madv = I915_MADV_WILLNEED; INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); - - i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } /** @@ -163,11 +142,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) static bool discard_backing_storage(struct drm_i915_gem_object *obj) { - /* If we are the last user of the backing storage (be it shmemfs + /* + * If we are the last user of the backing storage (be it shmemfs * pages or stolen etc), we know that the pages are going to be * immediately released. In this case, we can then skip copying * back the contents from the GPU. */ + if (!i915_gem_object_is_shrinkable(obj)) + return false; if (obj->mm.madv != I915_MADV_WILLNEED) return false; @@ -208,13 +190,15 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(!list_empty(&obj->vma.list)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - /* This serializes freeing with the shrinker. Since the free + /* + * This serializes freeing with the shrinker. Since the free * is delayed, first by RCU then by the workqueue, we want the * shrinker to be able to free pages of unreferenced objects, * or else we may oom whilst there are plenty of deferred * freed objects. */ - if (i915_gem_object_has_pages(obj)) { + if (i915_gem_object_has_pages(obj) && + i915_gem_object_is_shrinkable(obj)) { spin_lock(&i915->mm.obj_lock); list_del_init(&obj->mm.link); spin_unlock(&i915->mm.obj_lock); @@ -240,7 +224,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); - i915_gem_info_remove_obj(i915, obj->base.size); bitmap_free(obj->bit_17); i915_gem_object_free(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e53860147f21..7e64fd6bc19b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -56,9 +56,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, } GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg)); - spin_lock(&i915->mm.obj_lock); - list_add(&obj->mm.link, &i915->mm.unbound_list); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + spin_lock(&i915->mm.obj_lock); + i915->mm.shrink_count++; + i915->mm.shrink_memory += obj->base.size; + list_add(&obj->mm.link, &i915->mm.unbound_list); + spin_unlock(&i915->mm.obj_lock); + } } int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) @@ -146,9 +150,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (IS_ERR_OR_NULL(pages)) return pages; - spin_lock(&i915->mm.obj_lock); - list_del(&obj->mm.link); - spin_unlock(&i915->mm.obj_lock); + if (i915_gem_object_is_shrinkable(obj)) { + spin_lock(&i915->mm.obj_lock); + list_del(&obj->mm.link); + i915->mm.shrink_count--; + i915->mm.shrink_memory -= obj->base.size; + spin_unlock(&i915->mm.obj_lock); + } if (obj->mm.mapping) { void *ptr; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 6a93e326abf3..d71e630c6fb8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -309,30 +309,14 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_i915_gem_object *obj; - unsigned long num_objects = 0; - unsigned long count = 0; + unsigned long num_objects; + unsigned long count; - spin_lock(&i915->mm.obj_lock); - list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) - if (can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } + count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT; + num_objects = READ_ONCE(i915->mm.shrink_count); - list_for_each_entry(obj, &i915->mm.bound_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - list_for_each_entry(obj, &i915->mm.purge_list, mm.link) - if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) { - count += obj->base.size >> PAGE_SHIFT; - num_objects++; - } - spin_unlock(&i915->mm.obj_lock); - - /* Update our preferred vmscan batch size for the next pass. + /* + * Update our preferred vmscan batch size for the next pass. * Our rough guess for an effective batch size is roughly 2 * available GEM objects worth of pages. That is we don't want * the shrinker to fire, until it is worth the cost of freeing an diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 9080a736663a..84d4f549eb21 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -690,7 +690,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv mutex_unlock(&ggtt->vm.mutex); spin_lock(&dev_priv->mm.obj_lock); - list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); + GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); obj->bind_count++; spin_unlock(&dev_priv->mm.obj_lock); |