diff options
author | Hugh Dickins <hughd@google.com> | 2012-10-08 16:29:07 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 16:22:20 +0900 |
commit | 6597d783397aebb793fb529474cce5089aa4c67f (patch) | |
tree | 059d7e4c18c8cec61a71a6c15994867dceb7b93a /mm/mmap.c | |
parent | d741c9cdeee6a569dae0dbbaf028065402955b59 (diff) | |
download | lwn-6597d783397aebb793fb529474cce5089aa4c67f.tar.gz lwn-6597d783397aebb793fb529474cce5089aa4c67f.zip |
mm/mmap.c: replace find_vma_prepare() with clearer find_vma_links()
People get confused by find_vma_prepare(), because it doesn't care about
what it returns in its output args, when its callers won't be interested.
Clarify by passing in end-of-range address too, and returning failure if
any existing vma overlaps the new range: instead of returning an ambiguous
vma which most callers then must check. find_vma_links() is a clearer
name.
This does revert 2.6.27's dfe195fb79e88 ("mm: fix uninitialized variables
for find_vma_prepare callers"), but it looks like gcc 4.3.0 was one of
those releases too eager to shout about uninitialized variables: only
copy_vma() warns with 4.5.1 and 4.7.1, which a BUG on error silences.
[hughd@google.com: fix warning, remove BUG()]
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Benny Halevy <bhalevy@tonian.com>
Acked-by: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 45 |
1 files changed, 21 insertions, 24 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index a76042dc806d..2ba27434e843 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -353,17 +353,14 @@ void validate_mm(struct mm_struct *mm) #define validate_mm(mm) do { } while (0) #endif -static struct vm_area_struct * -find_vma_prepare(struct mm_struct *mm, unsigned long addr, - struct vm_area_struct **pprev, struct rb_node ***rb_link, - struct rb_node ** rb_parent) +static int find_vma_links(struct mm_struct *mm, unsigned long addr, + unsigned long end, struct vm_area_struct **pprev, + struct rb_node ***rb_link, struct rb_node **rb_parent) { - struct vm_area_struct * vma; - struct rb_node ** __rb_link, * __rb_parent, * rb_prev; + struct rb_node **__rb_link, *__rb_parent, *rb_prev; __rb_link = &mm->mm_rb.rb_node; rb_prev = __rb_parent = NULL; - vma = NULL; while (*__rb_link) { struct vm_area_struct *vma_tmp; @@ -372,9 +369,9 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr, vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); if (vma_tmp->vm_end > addr) { - vma = vma_tmp; - if (vma_tmp->vm_start <= addr) - break; + /* Fail if an existing vma overlaps the area */ + if (vma_tmp->vm_start < end) + return -ENOMEM; __rb_link = &__rb_parent->rb_left; } else { rb_prev = __rb_parent; @@ -387,7 +384,7 @@ find_vma_prepare(struct mm_struct *mm, unsigned long addr, *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); *rb_link = __rb_link; *rb_parent = __rb_parent; - return vma; + return 0; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, @@ -456,11 +453,12 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, */ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *__vma, *prev; + struct vm_area_struct *prev; struct rb_node **rb_link, *rb_parent; - __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); - BUG_ON(__vma && __vma->vm_start < vma->vm_end); + if (find_vma_links(mm, vma->vm_start, vma->vm_end, + &prev, &rb_link, &rb_parent)) + BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); mm->map_count++; } @@ -1221,8 +1219,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, /* Clear old maps */ error = -ENOMEM; munmap_back: - vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - if (vma && vma->vm_start < addr + len) { + if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; @@ -2183,8 +2180,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len) * Clear old maps. this also does some error checking for us */ munmap_back: - vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); - if (vma && vma->vm_start < addr + len) { + if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) { if (do_munmap(mm, addr, len)) return -ENOMEM; goto munmap_back; @@ -2298,10 +2294,10 @@ void exit_mmap(struct mm_struct *mm) * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_mutex is taken here. */ -int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct * __vma, * prev; - struct rb_node ** rb_link, * rb_parent; + struct vm_area_struct *prev; + struct rb_node **rb_link, *rb_parent; /* * The vm_pgoff of a purely anonymous vma should be irrelevant @@ -2319,8 +2315,8 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) BUG_ON(vma->anon_vma); vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; } - __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); - if (__vma && __vma->vm_start < vma->vm_end) + if (find_vma_links(mm, vma->vm_start, vma->vm_end, + &prev, &rb_link, &rb_parent)) return -ENOMEM; if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) @@ -2354,7 +2350,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, faulted_in_anon_vma = false; } - find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) + return NULL; /* should never get here */ new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); if (new_vma) { |