diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-03-04 14:29:08 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-04 16:35:15 -0800 |
commit | 7e924aafa4b03ff71de34af8553d9a1ebc86c071 (patch) | |
tree | 21735b7369ede27f34c41184b0a686020b80e2c6 /mm/memcontrol.c | |
parent | 9442ec9df40d952b0de185ae5638a74970388e01 (diff) | |
download | lwn-7e924aafa4b03ff71de34af8553d9a1ebc86c071.tar.gz lwn-7e924aafa4b03ff71de34af8553d9a1ebc86c071.zip |
memcg: mem_cgroup_charge never NULL
My memcgroup patch to fix hang with shmem/tmpfs added NULL page handling to
mem_cgroup_charge_common. It seemed convenient at the time, but hard to
justify now: there's a perfectly appropriate swappage to charge and uncharge
instead, this is not on any hot path through shmem_getpage, and no performance
hit was observed from the slight extra overhead.
So revert that NULL page handling from mem_cgroup_charge_common; and make it
clearer by bringing page_cgroup_assign_new_page_cgroup into its body - that
was a helper I found more of a hindrance to understanding.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 61 |
1 files changed, 21 insertions, 40 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9e170d3c71e5..83ba13ad31e1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -301,25 +301,6 @@ static void __always_inline unlock_page_cgroup(struct page *page) } /* - * Tie new page_cgroup to struct page under lock_page_cgroup() - * This can fail if the page has been tied to a page_cgroup. - * If success, returns 0. - */ -static int page_cgroup_assign_new_page_cgroup(struct page *page, - struct page_cgroup *pc) -{ - int ret = 0; - - lock_page_cgroup(page); - if (!page_get_page_cgroup(page)) - page_assign_page_cgroup(page, pc); - else /* A page is tied to other pc. */ - ret = 1; - unlock_page_cgroup(page); - return ret; -} - -/* * Clear page->page_cgroup member under lock_page_cgroup(). * If given "pc" value is different from one page->page_cgroup, * page->cgroup is not cleared. @@ -585,26 +566,24 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, * with it */ retry: - if (page) { - lock_page_cgroup(page); - pc = page_get_page_cgroup(page); - /* - * The page_cgroup exists and - * the page has already been accounted. - */ - if (pc) { - if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { - /* this page is under being uncharged ? */ - unlock_page_cgroup(page); - cpu_relax(); - goto retry; - } else { - unlock_page_cgroup(page); - goto done; - } + lock_page_cgroup(page); + pc = page_get_page_cgroup(page); + /* + * The page_cgroup exists and + * the page has already been accounted. + */ + if (pc) { + if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) { + /* this page is under being uncharged ? */ + unlock_page_cgroup(page); + cpu_relax(); + goto retry; + } else { + unlock_page_cgroup(page); + goto done; } - unlock_page_cgroup(page); } + unlock_page_cgroup(page); pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); if (pc == NULL) @@ -663,7 +642,9 @@ retry: if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) pc->flags |= PAGE_CGROUP_FLAG_CACHE; - if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) { + lock_page_cgroup(page); + if (page_get_page_cgroup(page)) { + unlock_page_cgroup(page); /* * Another charge has been added to this page already. * We take lock_page_cgroup(page) again and read @@ -672,10 +653,10 @@ retry: res_counter_uncharge(&mem->res, PAGE_SIZE); css_put(&mem->css); kfree(pc); - if (!page) - goto done; goto retry; } + page_assign_page_cgroup(page, pc); + unlock_page_cgroup(page); mz = page_cgroup_zoneinfo(pc); spin_lock_irqsave(&mz->lru_lock, flags); |