summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorXishi Qiu <qiuxishi@huawei.com>2015-02-11 15:25:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 17:06:01 -0800
commit23f086f962e67a1b8a508c0d8e86b7833c941564 (patch)
treee76cfd9b87fe025fde008bbc3d8520cc7184a96c /mm/page_alloc.c
parent91fbdc0f89807bb97792ea6893717a8d3154b871 (diff)
downloadlwn-23f086f962e67a1b8a508c0d8e86b7833c941564.tar.gz
lwn-23f086f962e67a1b8a508c0d8e86b7833c941564.zip
kmemcheck: move hook into __alloc_pages_nodemask() for the page allocator
Now kmemcheck_pagealloc_alloc() is only called by __alloc_pages_slowpath(). __alloc_pages_nodemask() __alloc_pages_slowpath() kmemcheck_pagealloc_alloc() And the page will not be tracked by kmemcheck in the following path. __alloc_pages_nodemask() get_page_from_freelist() So move kmemcheck_pagealloc_alloc() into __alloc_pages_nodemask(), like this: __alloc_pages_nodemask() ... get_page_from_freelist() if (!page) __alloc_pages_slowpath() kmemcheck_pagealloc_alloc() ... Signed-off-by: Xishi Qiu <qiuxishi@huawei.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Li Zefan <lizefan@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1c7d90f7a84a..a88cb0cbf352 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2842,11 +2842,7 @@ retry:
nopage:
warn_alloc_failed(gfp_mask, order, NULL);
- return page;
got_pg:
- if (kmemcheck_enabled)
- kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
return page;
}
@@ -2916,6 +2912,9 @@ retry_cpuset:
preferred_zone, classzone_idx, migratetype);
}
+ if (kmemcheck_enabled && page)
+ kmemcheck_pagealloc_alloc(page, order, gfp_mask);
+
trace_mm_page_alloc(page, order, alloc_mask, migratetype);
out: