diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2021-03-12 23:46:45 -0500 |
---|---|---|
committer | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-01-04 13:15:34 -0500 |
commit | 820b05e92bdf07de94bc52c17d935d9ca0a481b3 (patch) | |
tree | 9025ad685cf35dc2b0269e9fcb49c0ab69d09880 /mm/filemap.c | |
parent | 9184a307768bf66af1f67d903d7b00725b7a6e8c (diff) | |
download | lwn-820b05e92bdf07de94bc52c17d935d9ca0a481b3.tar.gz lwn-820b05e92bdf07de94bc52c17d935d9ca0a481b3.zip |
filemap: Use a folio in filemap_map_pages
Saves 61 bytes due to fewer calls to compound_head().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 14019070c98b..f595563057c3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3227,7 +3227,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) return false; } -static struct page *next_uptodate_page(struct folio *folio, +static struct folio *next_uptodate_page(struct folio *folio, struct address_space *mapping, struct xa_state *xas, pgoff_t end_pgoff) { @@ -3258,7 +3258,7 @@ static struct page *next_uptodate_page(struct folio *folio, max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); if (xas->xa_index >= max_idx) goto unlock; - return &folio->page; + return folio; unlock: folio_unlock(folio); skip: @@ -3268,7 +3268,7 @@ skip: return NULL; } -static inline struct page *first_map_page(struct address_space *mapping, +static inline struct folio *first_map_page(struct address_space *mapping, struct xa_state *xas, pgoff_t end_pgoff) { @@ -3276,7 +3276,7 @@ static inline struct page *first_map_page(struct address_space *mapping, mapping, xas, end_pgoff); } -static inline struct page *next_map_page(struct address_space *mapping, +static inline struct folio *next_map_page(struct address_space *mapping, struct xa_state *xas, pgoff_t end_pgoff) { @@ -3293,16 +3293,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t last_pgoff = start_pgoff; unsigned long addr; XA_STATE(xas, &mapping->i_pages, start_pgoff); - struct page *head, *page; + struct folio *folio; + struct page *page; unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); vm_fault_t ret = 0; rcu_read_lock(); - head = first_map_page(mapping, &xas, end_pgoff); - if (!head) + folio = first_map_page(mapping, &xas, end_pgoff); + if (!folio) goto out; - if (filemap_map_pmd(vmf, head)) { + if (filemap_map_pmd(vmf, &folio->page)) { ret = VM_FAULT_NOPAGE; goto out; } @@ -3310,7 +3311,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); do { - page = find_subpage(head, xas.xa_index); + page = folio_file_page(folio, xas.xa_index); if (PageHWPoison(page)) goto unlock; @@ -3331,12 +3332,12 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, do_set_pte(vmf, page, addr); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, addr, vmf->pte); - unlock_page(head); + folio_unlock(folio); continue; unlock: - unlock_page(head); - put_page(head); - } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); + folio_unlock(folio); + folio_put(folio); + } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL); pte_unmap_unlock(vmf->pte, vmf->ptl); out: rcu_read_unlock(); |