summaryrefslogtreecommitdiff
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-07-25 17:49:04 -0700
committerDavid S. Miller <davem@davemloft.net>2020-07-25 17:49:04 -0700
commita57066b1a01977a646145f4ce8dfb4538b08368a (patch)
tree57c2b4fa2fc48e687a1820b9bf4ef4f4363be0f9 /mm/mmap.c
parentdfecd3e00cd32b2a6d1cfdb30b513dd42575ada3 (diff)
parent04300d66f0a06d572d9f2ad6768c38cabde22179 (diff)
downloadlwn-a57066b1a01977a646145f4ce8dfb4538b08368a.tar.gz
lwn-a57066b1a01977a646145f4ce8dfb4538b08368a.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The UDP reuseport conflict was a little bit tricky. The net-next code, via bpf-next, extracted the reuseport handling into a helper so that the BPF sk lookup code could invoke it. At the same time, the logic for reuseport handling of unconnected sockets changed via commit efc6b6f6c3113e8b203b9debfb72d81e0f3dcace which changed the logic to carry on the reuseport result into the rest of the lookup loop if we do not return immediately. This requires moving the reuseport_has_conns() logic into the callers. While we are here, get rid of inline directives as they do not belong in foo.c files. The other changes were cases of more straightforward overlapping modifications. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 59a4682ebf3f..8c7ca737a19b 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2620,7 +2620,7 @@ static void unmap_region(struct mm_struct *mm,
* Create a list of vma's touched by the unmap, removing them from the mm's
* vma list as we go..
*/
-static void
+static bool
detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, unsigned long end)
{
@@ -2645,6 +2645,17 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
/* Kill the cache */
vmacache_invalidate(mm);
+
+ /*
+ * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+ * VM_GROWSUP VMA. Such VMAs can change their size under
+ * down_read(mmap_lock) and collide with the VMA we are about to unmap.
+ */
+ if (vma && (vma->vm_flags & VM_GROWSDOWN))
+ return false;
+ if (prev && (prev->vm_flags & VM_GROWSUP))
+ return false;
+ return true;
}
/*
@@ -2825,7 +2836,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
}
/* Detach vmas from rbtree */
- detach_vmas_to_be_unmapped(mm, vma, prev, end);
+ if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
+ downgrade = false;
if (downgrade)
mmap_write_downgrade(mm);