summaryrefslogtreecommitdiff
path: root/mm/madvise.c
diff options
context:
space:
mode:
authorWanpeng Li <liwanp@linux.vnet.ibm.com>2013-09-30 13:45:21 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-30 14:31:02 -0700
commit20cb6cab52a21b46e3c0dc7bd23f004f810fb421 (patch)
treea0adabe2b403a45cdb9dcf034a1f1a01f1468168 /mm/madvise.c
parent080506ad0aa9c9fbc7879cdd290d55624da08c60 (diff)
downloadlwn-20cb6cab52a21b46e3c0dc7bd23f004f810fb421.tar.gz
lwn-20cb6cab52a21b46e3c0dc7bd23f004f810fb421.zip
mm/hwpoison: fix traversal of hugetlbfs pages to avoid printk flood
madvise_hwpoison won't check if the page is small page or huge page and traverses in small page granularity against the range unconditionally, which result in a printk flood "MCE xxx: already hardware poisoned" if the page is a huge page. This patch fixes it by using compound_order(compound_head(page)) for huge page iterator. Testcase: #define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <sys/mman.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <errno.h> #define PAGES_TO_TEST 3 #define PAGE_SIZE 4096 * 512 int main(void) { char *mem; int i; mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 0, 0); if (madvise(mem, PAGES_TO_TEST * PAGE_SIZE, MADV_HWPOISON) == -1) return -1; munmap(mem, PAGES_TO_TEST * PAGE_SIZE); return 0; } Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/madvise.c')
-rw-r--r--mm/madvise.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 6975bc812542..539eeb96b323 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -343,10 +343,11 @@ static long madvise_remove(struct vm_area_struct *vma,
*/
static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
{
+ struct page *p;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- for (; start < end; start += PAGE_SIZE) {
- struct page *p;
+ for (; start < end; start += PAGE_SIZE <<
+ compound_order(compound_head(p))) {
int ret;
ret = get_user_pages_fast(start, 1, 0, &p);