diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-01-16 19:29:59 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:33:21 -0800 |
commit | 5b4bd90f9ac76136c7148684b12276d4ae2d64a2 (patch) | |
tree | dba20cc221190f830ba7e5bb8c5a74126cdb9a34 /mm | |
parent | e0650a41f7d024b72669a2a2db846ef70281abd8 (diff) | |
download | lwn-5b4bd90f9ac76136c7148684b12276d4ae2d64a2.tar.gz lwn-5b4bd90f9ac76136c7148684b12276d4ae2d64a2.zip |
rmap: add folio parameter to __page_set_anon_rmap()
Avoid the compound_head() call in PageAnon() by passing in the folio that
all callers have. Also save me from wondering whether page->mapping can
ever be overwritten on a tail page (I don't think it can, but I'm not 100%
sure).
Link: https://lkml.kernel.org/r/20230116192959.2147032-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 0b5abdda1e6b..43760d622040 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1131,19 +1131,20 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) /** * __page_set_anon_rmap - set up new anonymous rmap - * @page: Page or Hugepage to add to rmap + * @folio: Folio which contains page. + * @page: Page to add to rmap. * @vma: VM area to add page to. * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */ -static void __page_set_anon_rmap(struct page *page, +static void __page_set_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); - if (PageAnon(page)) + if (folio_test_anon(folio)) goto out; /* @@ -1155,14 +1156,14 @@ static void __page_set_anon_rmap(struct page *page, anon_vma = anon_vma->root; /* - * page_idle does a lockless/optimistic rmap scan on page->mapping. + * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); - page->index = linear_page_index(vma, address); + WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); + folio->index = linear_page_index(vma, address); out: if (exclusive) SetPageAnonExclusive(page); @@ -1254,7 +1255,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, if (likely(!folio_test_ksm(folio))) { /* address might be in next vma when migration races vma_adjust */ if (first) - __page_set_anon_rmap(page, vma, address, + __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); else __page_check_anon_rmap(page, vma, address); @@ -1297,7 +1298,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - __page_set_anon_rmap(&folio->page, vma, address, 1); + __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } /** @@ -2528,7 +2529,7 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (first) - __page_set_anon_rmap(page, vma, address, + __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); } @@ -2541,6 +2542,6 @@ void hugepage_add_new_anon_rmap(struct page *page, /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); - __page_set_anon_rmap(page, vma, address, 1); + __page_set_anon_rmap(folio, page, vma, address, 1); } #endif /* CONFIG_HUGETLB_PAGE */ |