diff options
author | Vladimir Davydov <vdavydov@parallels.com> | 2015-02-12 14:59:38 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-12 18:54:10 -0800 |
commit | 2788cf0c401c268b4819c5407493a8769b7007aa (patch) | |
tree | 863ea244d6908bd6e8149e6cd81270389a9426a8 /mm/list_lru.c | |
parent | 3f97b163207c67a3b35931494ad3db1de66356f0 (diff) | |
download | lwn-2788cf0c401c268b4819c5407493a8769b7007aa.tar.gz lwn-2788cf0c401c268b4819c5407493a8769b7007aa.zip |
memcg: reparent list_lrus and free kmemcg_id on css offline
Now, the only reason to keep kmemcg_id till css free is list_lru, which
uses it to distribute elements between per-memcg lists. However, it can
be easily sorted out - we only need to change kmemcg_id of an offline
cgroup to its parent's id, making further list_lru_add()'s add elements to
the parent's list, and then move all elements from the offline cgroup's
list to the one of its parent. It will work, because a racing
list_lru_del() does not need to know the list it is deleting the element
from. It can decrement the wrong nr_items counter though, but the ongoing
reparenting will fix it. After list_lru reparenting is done we are free
to release kmemcg_id saving a valuable slot in a per-memcg array for new
cgroups.
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/list_lru.c')
-rw-r--r-- | mm/list_lru.c | 46 |
1 files changed, 43 insertions, 3 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c index 8d9d168c6c38..909eca2c820e 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -100,7 +100,6 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) spin_lock(&nlru->lock); l = list_lru_from_kmem(nlru, item); - WARN_ON_ONCE(l->nr_items < 0); if (list_empty(item)) { list_add_tail(item, &l->list); l->nr_items++; @@ -123,7 +122,6 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) if (!list_empty(item)) { list_del_init(item); l->nr_items--; - WARN_ON_ONCE(l->nr_items < 0); spin_unlock(&nlru->lock); return true; } @@ -156,7 +154,6 @@ static unsigned long __list_lru_count_one(struct list_lru *lru, spin_lock(&nlru->lock); l = list_lru_from_memcg_idx(nlru, memcg_idx); - WARN_ON_ONCE(l->nr_items < 0); count = l->nr_items; spin_unlock(&nlru->lock); @@ -458,6 +455,49 @@ fail: memcg_cancel_update_list_lru(lru, old_size, new_size); goto out; } + +static void memcg_drain_list_lru_node(struct list_lru_node *nlru, + int src_idx, int dst_idx) +{ + struct list_lru_one *src, *dst; + + /* + * Since list_lru_{add,del} may be called under an IRQ-safe lock, + * we have to use IRQ-safe primitives here to avoid deadlock. + */ + spin_lock_irq(&nlru->lock); + + src = list_lru_from_memcg_idx(nlru, src_idx); + dst = list_lru_from_memcg_idx(nlru, dst_idx); + + list_splice_init(&src->list, &dst->list); + dst->nr_items += src->nr_items; + src->nr_items = 0; + + spin_unlock_irq(&nlru->lock); +} + +static void memcg_drain_list_lru(struct list_lru *lru, + int src_idx, int dst_idx) +{ + int i; + + if (!list_lru_memcg_aware(lru)) + return; + + for (i = 0; i < nr_node_ids; i++) + memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx); +} + +void memcg_drain_all_list_lrus(int src_idx, int dst_idx) +{ + struct list_lru *lru; + + mutex_lock(&list_lrus_mutex); + list_for_each_entry(lru, &list_lrus, list) + memcg_drain_list_lru(lru, src_idx, dst_idx); + mutex_unlock(&list_lrus_mutex); +} #else static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { |