summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-12-16 10:21:23 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2005-12-16 10:21:23 -0800
commit4d7672b46244abffea1953e55688c0ea143dd617 (patch)
tree9f3bdf438bcb0d5f6e723665ced23308fffb8368
parent281ab031a8c9e5b593142eb4ec59a87faae8676a (diff)
downloadlwn-4d7672b46244abffea1953e55688c0ea143dd617.tar.gz
lwn-4d7672b46244abffea1953e55688c0ea143dd617.zip
Make sure we copy pages inserted with "vm_insert_page()" on fork
The logic that decides that a fork() might be able to avoid copying a VM area when it can be re-created by page faults didn't know about the new vm_insert_page() case. Also make some things a bit more anal wrt VM_PFNMAP. Pointed out by Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mremap.c2
4 files changed, 5 insertions, 3 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e5677f456742..a06a84d347fb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -163,6 +163,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
diff --git a/mm/memory.c b/mm/memory.c
index d22f78c8a381..d8dde07a3656 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -574,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
@@ -1228,6 +1228,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
return -EFAULT;
if (!page_count(page))
return -EINVAL;
+ vma->vm_flags |= VM_INSERTPAGE;
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 11ca5927d5ff..64ba4dbcb7de 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -611,7 +611,7 @@ again: remove_next = 1 + (end > next->vm_end);
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
diff --git a/mm/mremap.c b/mm/mremap.c
index b535438c363c..ddaeee9a0b69 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -323,7 +323,7 @@ unsigned long do_mremap(unsigned long addr,
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto out;
- if (vma->vm_flags & VM_DONTEXPAND) {
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
if (new_len > old_len)
goto out;
}