diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2015-07-06 23:18:37 +0300 |
---|---|---|
committer | Jiri Slaby <jslaby@suse.cz> | 2015-08-19 08:36:27 +0200 |
commit | bf653833fc72668cadcb1f54310f3d0bd6b36393 (patch) | |
tree | fabe83c3a78daf38ee430aa3b721b7530c67039c | |
parent | c707649e400a18e0b2fa96e533270673cfa4687f (diff) | |
download | lwn-bf653833fc72668cadcb1f54310f3d0bd6b36393.tar.gz lwn-bf653833fc72668cadcb1f54310f3d0bd6b36393.zip |
mm: avoid setting up anonymous pages into file mapping
commit 6b7339f4c31ad69c8e9c0b2859276e22cf72176d upstream.
Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().
Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.
For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Willy Tarreau <w@1wt.eu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>
-rw-r--r-- | mm/memory.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/mm/memory.c b/mm/memory.c index 38617f049b9f..d0d84c36cd5c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3213,6 +3213,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap(page_table); + /* File mapping without ->vm_ops ? */ + if (vma->vm_flags & VM_SHARED) + return VM_FAULT_SIGBUS; + /* Check if we need to add a guard page to the stack */ if (check_stack_guard_page(vma, address) < 0) return VM_FAULT_SIGSEGV; @@ -3480,6 +3484,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; pte_unmap(page_table); + /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ + if (!vma->vm_ops->fault) + return VM_FAULT_SIGBUS; return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); } @@ -3691,11 +3698,9 @@ static int handle_pte_fault(struct mm_struct *mm, entry = ACCESS_ONCE(*pte); if (!pte_present(entry)) { if (pte_none(entry)) { - if (vma->vm_ops) { - if (likely(vma->vm_ops->fault)) - return do_linear_fault(mm, vma, address, + if (vma->vm_ops) + return do_linear_fault(mm, vma, address, pte, pmd, flags, entry); - } return do_anonymous_page(mm, vma, address, pte, pmd, flags); } |