diff options
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 58 |
1 files changed, 40 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c index 6bf2b471e30c..7663068a33c6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1155,12 +1155,6 @@ again: if (!PageAnon(page)) { if (pte_dirty(ptent)) { - /* - * oom_reaper cannot tear down dirty - * pages - */ - if (unlikely(details && details->ignore_dirty)) - continue; force_flush = 1; set_page_dirty(page); } @@ -1179,8 +1173,8 @@ again: } continue; } - /* only check swap_entries if explicitly asked for in details */ - if (unlikely(details && !details->check_swap_entries)) + /* If details->check_mapping, we leave swap entries. */ + if (unlikely(details)) continue; entry = pte_to_swp_entry(ptent); @@ -1376,12 +1370,11 @@ void unmap_vmas(struct mmu_gather *tlb, * @vma: vm_area_struct holding the applicable pages * @start: starting address of pages to zap * @size: number of bytes to zap - * @details: details of shared cache invalidation * * Caller must protect the VMA list */ void zap_page_range(struct vm_area_struct *vma, unsigned long start, - unsigned long size, struct zap_details *details) + unsigned long size) { struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; @@ -1392,7 +1385,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, update_hiwater_rss(mm); mmu_notifier_invalidate_range_start(mm, start, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next) - unmap_single_vma(&tlb, vma, start, end, details); + unmap_single_vma(&tlb, vma, start, end, NULL); mmu_notifier_invalidate_range_end(mm, start, end); tlb_finish_mmu(&tlb, start, end); } @@ -3471,12 +3464,10 @@ out: static int create_huge_pmd(struct vm_fault *vmf) { - struct vm_area_struct *vma = vmf->vma; - if (vma_is_anonymous(vma)) + if (vma_is_anonymous(vmf->vma)) return do_huge_pmd_anonymous_page(vmf); - if (vma->vm_ops->pmd_fault) - return vma->vm_ops->pmd_fault(vma, vmf->address, vmf->pmd, - vmf->flags); + if (vmf->vma->vm_ops->pmd_fault) + return vmf->vma->vm_ops->pmd_fault(vmf); return VM_FAULT_FALLBACK; } @@ -3485,8 +3476,7 @@ static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) if (vma_is_anonymous(vmf->vma)) return do_huge_pmd_wp_page(vmf, orig_pmd); if (vmf->vma->vm_ops->pmd_fault) - return vmf->vma->vm_ops->pmd_fault(vmf->vma, vmf->address, - vmf->pmd, vmf->flags); + return vmf->vma->vm_ops->pmd_fault(vmf); /* COW handled on pte level: split pmd */ VM_BUG_ON_VMA(vmf->vma->vm_flags & VM_SHARED, vmf->vma); @@ -4155,6 +4145,38 @@ void copy_user_huge_page(struct page *dst, struct page *src, copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); } } + +long copy_huge_page_from_user(struct page *dst_page, + const void __user *usr_src, + unsigned int pages_per_huge_page, + bool allow_pagefault) +{ + void *src = (void *)usr_src; + void *page_kaddr; + unsigned long i, rc = 0; + unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; + + for (i = 0; i < pages_per_huge_page; i++) { + if (allow_pagefault) + page_kaddr = kmap(dst_page + i); + else + page_kaddr = kmap_atomic(dst_page + i); + rc = copy_from_user(page_kaddr, + (const void __user *)(src + i * PAGE_SIZE), + PAGE_SIZE); + if (allow_pagefault) + kunmap(dst_page + i); + else + kunmap_atomic(page_kaddr); + + ret_val -= (PAGE_SIZE - rc); + if (rc) + break; + + cond_resched(); + } + return ret_val; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |