diff options
author | Yin Fengwei <fengwei.yin@intel.com> | 2023-08-02 16:14:04 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-24 16:20:26 -0700 |
commit | 3bd786f76de2e01745f462844fd1a206052ee8b8 (patch) | |
tree | 235d513d3ad2d939fd00280fed415ecbac2eec1b /mm/memory.c | |
parent | 86f35f69db8e7d169c36472a349507ab0a461f49 (diff) | |
download | lwn-3bd786f76de2e01745f462844fd1a206052ee8b8.tar.gz lwn-3bd786f76de2e01745f462844fd1a206052ee8b8.zip |
mm: convert do_set_pte() to set_pte_range()
set_pte_range() allows to setup page table entries for a specific
range. It takes advantage of batched rmap update for large folio.
It now takes care of calling update_mmu_cache_range().
Link: https://lkml.kernel.org/r/20230802151406.3735276-37-willy@infradead.org
Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/mm/memory.c b/mm/memory.c index fbb7f066bfb6..12b385eaf353 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4330,15 +4330,24 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) } #endif -void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) +/** + * set_pte_range - Set a range of PTEs to point to pages in a folio. + * @vmf: Fault decription. + * @folio: The folio that contains @page. + * @page: The first page to create a PTE for. + * @nr: The number of PTEs to create. + * @addr: The first address to create a PTE for. + */ +void set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr) { struct vm_area_struct *vma = vmf->vma; bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); bool write = vmf->flags & FAULT_FLAG_WRITE; - bool prefault = vmf->address != addr; + bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; - flush_icache_page(vma, page); + flush_icache_pages(vma, page, nr); entry = mk_pte(page, vma->vm_page_prot); if (prefault && arch_wants_old_prefaulted_pte()) @@ -4352,14 +4361,18 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, addr); - lru_cache_add_inactive_or_unevictable(page, vma); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); + VM_BUG_ON_FOLIO(nr != 1, folio); + folio_add_new_anon_rmap(folio, vma, addr); + folio_add_lru_vma(folio, vma); } else { - inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); + add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); + folio_add_file_rmap_range(folio, page, nr, vma, false); } - set_pte_at(vma->vm_mm, addr, vmf->pte, entry); + set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); } static bool vmf_pte_changed(struct vm_fault *vmf) @@ -4427,11 +4440,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { - do_set_pte(vmf, page, vmf->address); - - /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, vmf->address, vmf->pte); + struct folio *folio = page_folio(page); + set_pte_range(vmf, folio, page, 1, vmf->address); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); |