summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-03-14 01:08:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-14 08:29:50 -0700
commit2fbfac4e053861925fa3fffcdc327649b09af54c (patch)
tree36d9da87f5a42d61cd8be2fd02bab666cb8ac145 /mm
parentc44ed965be7a84afaa07543c04eb97a5dfe93422 (diff)
downloadlwn-2fbfac4e053861925fa3fffcdc327649b09af54c.tar.gz
lwn-2fbfac4e053861925fa3fffcdc327649b09af54c.zip
thp+memcg-numa: fix BUG at include/linux/mm.h:370!
THP's collapse_huge_page() has an understandable but ugly difference in when its huge page is allocated: inside if NUMA but outside if not. It's hardly surprising that the memcg failure path forgot that, freeing the page in the non-NUMA case, then hitting a VM_BUG_ON in get_page() (or even worse, using the freed page). Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index dbe99a5f2073..113e35c47502 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1762,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm,
#ifndef CONFIG_NUMA
VM_BUG_ON(!*hpage);
new_page = *hpage;
+ if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+ up_read(&mm->mmap_sem);
+ return;
+ }
#else
VM_BUG_ON(*hpage);
/*
@@ -1781,12 +1785,12 @@ static void collapse_huge_page(struct mm_struct *mm,
*hpage = ERR_PTR(-ENOMEM);
return;
}
-#endif
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
put_page(new_page);
return;
}
+#endif
/* after allocating the hugepage upgrade to mmap_sem write mode */
up_read(&mm->mmap_sem);