diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2015-02-12 14:59:32 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 18:54:10 -0800 |
commit | 2a4db7eb9391a544ff58f4fa11d35246e87c87af (patch) | |
tree | 3bbd57297a8303ffa227d6ea5600d2593a0302f4 /mm | |
parent | f1008365bbe4931d6a94dcfc11cf4cdada359664 (diff) | |
download | lwn-2a4db7eb9391a544ff58f4fa11d35246e87c87af.tar.gz lwn-2a4db7eb9391a544ff58f4fa11d35246e87c87af.zip |
memcg: free memcg_caches slot on css offline
We need to look up a kmem_cache in ->memcg_params.memcg_caches arrays only
on allocations, so there is no need to have the array entries set until
css free - we can clear them on css offline. This will allow us to reuse
array entries more efficiently and avoid costly array relocations.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 38 | ||||
-rw-r--r-- | mm/slab_common.c | 39 |
2 files changed, 60 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6f3c0fcd7a2d..abfe0135bfdc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -334,6 +334,7 @@ struct mem_cgroup { #if defined(CONFIG_MEMCG_KMEM) /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; + bool kmem_acct_active; #endif int last_scanned_node; @@ -354,7 +355,7 @@ struct mem_cgroup { #ifdef CONFIG_MEMCG_KMEM bool memcg_kmem_is_active(struct mem_cgroup *memcg) { - return memcg->kmemcg_id >= 0; + return memcg->kmem_acct_active; } #endif @@ -585,7 +586,7 @@ static void memcg_free_cache_id(int id); static void disarm_kmem_keys(struct mem_cgroup *memcg) { - if (memcg_kmem_is_active(memcg)) { + if (memcg->kmemcg_id >= 0) { static_key_slow_dec(&memcg_kmem_enabled_key); memcg_free_cache_id(memcg->kmemcg_id); } @@ -2666,6 +2667,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) { struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; + int kmemcg_id; VM_BUG_ON(!is_root_cache(cachep)); @@ -2673,10 +2675,11 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) return cachep; memcg = get_mem_cgroup_from_mm(current->mm); - if (!memcg_kmem_is_active(memcg)) + kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id); + if (kmemcg_id < 0) goto out; - memcg_cachep = cache_from_memcg_idx(cachep, memcg_cache_id(memcg)); + memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id); if (likely(memcg_cachep)) return memcg_cachep; @@ -3318,8 +3321,8 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg, int err = 0; int memcg_id; - if (memcg_kmem_is_active(memcg)) - return 0; + BUG_ON(memcg->kmemcg_id >= 0); + BUG_ON(memcg->kmem_acct_active); /* * For simplicity, we won't allow this to be disabled. It also can't @@ -3362,6 +3365,7 @@ static int memcg_activate_kmem(struct mem_cgroup *memcg, * patched. */ memcg->kmemcg_id = memcg_id; + memcg->kmem_acct_active = true; out: return err; } @@ -4041,6 +4045,22 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) return mem_cgroup_sockets_init(memcg, ss); } +static void memcg_deactivate_kmem(struct mem_cgroup *memcg) +{ + if (!memcg->kmem_acct_active) + return; + + /* + * Clear the 'active' flag before clearing memcg_caches arrays entries. + * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it + * guarantees no cache will be created for this cgroup after we are + * done (see memcg_create_kmem_cache()). + */ + memcg->kmem_acct_active = false; + + memcg_deactivate_kmem_caches(memcg); +} + static void memcg_destroy_kmem(struct mem_cgroup *memcg) { memcg_destroy_kmem_caches(memcg); @@ -4052,6 +4072,10 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) return 0; } +static void memcg_deactivate_kmem(struct mem_cgroup *memcg) +{ +} + static void memcg_destroy_kmem(struct mem_cgroup *memcg) { } @@ -4608,6 +4632,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) spin_unlock(&memcg->event_list_lock); vmpressure_cleanup(&memcg->vmpressure); + + memcg_deactivate_kmem(memcg); } static void mem_cgroup_css_free(struct cgroup_subsys_state *css) diff --git a/mm/slab_common.c b/mm/slab_common.c index 6087b1f9a385..0873bcc61c7a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -440,18 +440,8 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s, *need_rcu_barrier = true; #ifdef CONFIG_MEMCG_KMEM - if (!is_root_cache(s)) { - int idx; - struct memcg_cache_array *arr; - - idx = memcg_cache_id(s->memcg_params.memcg); - arr = rcu_dereference_protected(s->memcg_params.root_cache-> - memcg_params.memcg_caches, - lockdep_is_held(&slab_mutex)); - BUG_ON(arr->entries[idx] != s); - arr->entries[idx] = NULL; + if (!is_root_cache(s)) list_del(&s->memcg_params.list); - } #endif list_move(&s->list, release); return 0; @@ -499,6 +489,13 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, mutex_lock(&slab_mutex); + /* + * The memory cgroup could have been deactivated while the cache + * creation work was pending. + */ + if (!memcg_kmem_is_active(memcg)) + goto out_unlock; + idx = memcg_cache_id(memcg); arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, lockdep_is_held(&slab_mutex)); @@ -548,6 +545,26 @@ out_unlock: put_online_cpus(); } +void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) +{ + int idx; + struct memcg_cache_array *arr; + struct kmem_cache *s; + + idx = memcg_cache_id(memcg); + + mutex_lock(&slab_mutex); + list_for_each_entry(s, &slab_caches, list) { + if (!is_root_cache(s)) + continue; + + arr = rcu_dereference_protected(s->memcg_params.memcg_caches, + lockdep_is_held(&slab_mutex)); + arr->entries[idx] = NULL; + } + mutex_unlock(&slab_mutex); +} + void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) { LIST_HEAD(release); |