diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 325 |
1 files changed, 158 insertions, 167 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index b616870a09be..15ae24585fc4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -262,11 +262,12 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * - * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and - * anon_vma_fork(). The first three want an exact copy of src, while the last - * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent - * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, - * we can identify this case by checking (!dst->anon_vma && src->anon_vma). + * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), + * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, + * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to + * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before + * call, we can identify this case by checking (!dst->anon_vma && + * src->anon_vma). * * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find * and reuse existing anon_vma which has no vmas and only one child anon_vma. @@ -823,25 +824,14 @@ static bool folio_referenced_one(struct folio *folio, } if (pvmw.pte) { - if (lru_gen_enabled() && pte_young(*pvmw.pte) && - !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) { + if (lru_gen_enabled() && pte_young(*pvmw.pte)) { lru_gen_look_around(&pvmw); referenced++; } if (ptep_clear_flush_young_notify(vma, address, - pvmw.pte)) { - /* - * Don't treat a reference through - * a sequentially read mapping as such. - * If the folio has been used in another mapping, - * we will catch it; if this other mapping is - * already gone, the unmap path will have set - * the referenced flag or activated the folio. - */ - if (likely(!(vma->vm_flags & VM_SEQ_READ))) - referenced++; - } + pvmw.pte)) + referenced++; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) @@ -875,7 +865,20 @@ static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) struct folio_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; - if (!mm_match_cgroup(vma->vm_mm, memcg)) + /* + * Ignore references from this mapping if it has no recency. If the + * folio has been used in another mapping, we will catch it; if this + * other mapping is already gone, the unmap path will have set the + * referenced flag or activated the folio in zap_pte_range(). + */ + if (!vma_has_recency(vma)) + return true; + + /* + * If we are reclaiming on behalf of a cgroup, skip counting on behalf + * of references from different cgroups. + */ + if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; @@ -906,6 +909,7 @@ int folio_referenced(struct folio *folio, int is_locked, .arg = (void *)&pra, .anon_lock = folio_lock_anon_vma_read, .try_lock = true, + .invalid_vma = invalid_folio_referenced_vma, }; *vm_flags = 0; @@ -921,15 +925,6 @@ int folio_referenced(struct folio *folio, int is_locked, return 1; } - /* - * If we are reclaiming on behalf of a cgroup, skip - * counting on behalf of references from different - * cgroups - */ - if (memcg) { - rwc.invalid_vma = invalid_folio_referenced_vma; - } - rmap_walk(folio, &rwc); *vm_flags = pra.vm_flags; @@ -950,9 +945,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) * We have to assume the worse case ie pmd for invalidation. Note that * the folio can not be freed from this function. */ - mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, - 0, vma, vma->vm_mm, address, - vma_address_end(pvmw)); + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, + vma->vm_mm, address, vma_address_end(pvmw)); mmu_notifier_invalidate_range_start(&range); while (page_vma_mapped_walk(pvmw)) { @@ -1085,26 +1079,26 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, return page_vma_mkclean_one(&pvmw); } -int total_compound_mapcount(struct page *head) +int folio_total_mapcount(struct folio *folio) { - int mapcount = head_compound_mapcount(head); - int nr_subpages; + int mapcount = folio_entire_mapcount(folio); + int nr_pages; int i; - /* In the common case, avoid the loop when no subpages mapped by PTE */ - if (head_subpages_mapcount(head) == 0) + /* In the common case, avoid the loop when no pages mapped by PTE */ + if (folio_nr_pages_mapped(folio) == 0) return mapcount; /* - * Add all the PTE mappings of those subpages mapped by PTE. - * Limit the loop, knowing that only subpages_mapcount are mapped? + * Add all the PTE mappings of those pages mapped by PTE. + * Limit the loop to folio_nr_pages_mapped()? * Perhaps: given all the raciness, that may be a good or a bad idea. */ - nr_subpages = thp_nr_pages(head); - for (i = 0; i < nr_subpages; i++) - mapcount += atomic_read(&head[i]._mapcount); + nr_pages = folio_nr_pages(folio); + for (i = 0; i < nr_pages; i++) + mapcount += atomic_read(&folio_page(folio, i)->_mapcount); /* But each of those _mapcounts was based on -1 */ - mapcount += nr_subpages; + mapcount += nr_pages; return mapcount; } @@ -1138,19 +1132,20 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) /** * __page_set_anon_rmap - set up new anonymous rmap - * @page: Page or Hugepage to add to rmap + * @folio: Folio which contains page. + * @page: Page to add to rmap. * @vma: VM area to add page to. * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */ -static void __page_set_anon_rmap(struct page *page, +static void __page_set_anon_rmap(struct folio *folio, struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); - if (PageAnon(page)) + if (folio_test_anon(folio)) goto out; /* @@ -1162,14 +1157,14 @@ static void __page_set_anon_rmap(struct page *page, anon_vma = anon_vma->root; /* - * page_idle does a lockless/optimistic rmap scan on page->mapping. + * page_idle does a lockless/optimistic rmap scan on folio->mapping. * Make sure the compiler doesn't split the stores of anon_vma and * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code * could mistake the mapping for a struct address_space and crash. */ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); - page->index = linear_page_index(vma, address); + WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); + folio->index = linear_page_index(vma, address); out: if (exclusive) SetPageAnonExclusive(page); @@ -1214,36 +1209,32 @@ static void __page_check_anon_rmap(struct page *page, * and to ensure that PageAnon is not being upgraded racily to PageKsm * (but PageKsm is never downgraded to PageAnon). */ -void page_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, rmap_t flags) +void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, + unsigned long address, rmap_t flags) { - atomic_t *mapped; + struct folio *folio = page_folio(page); + atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool compound = flags & RMAP_COMPOUND; bool first = true; - if (unlikely(PageKsm(page))) - lock_page_memcg(page); - /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { first = atomic_inc_and_test(&page->_mapcount); nr = first; - if (first && PageCompound(page)) { - mapped = subpages_mapcount_ptr(compound_head(page)); + if (first && folio_test_large(folio)) { nr = atomic_inc_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } - } else if (PageTransHuge(page)) { + } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ - first = atomic_inc_and_test(compound_mapcount_ptr(page)); + first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { - mapped = subpages_mapcount_ptr(page); nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = thp_nr_pages(page); - nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); + nr_pmdmapped = folio_nr_pages(folio); + nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; @@ -1258,59 +1249,57 @@ void page_add_anon_rmap(struct page *page, VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (nr_pmdmapped) - __mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped); + __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) - __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); + __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - if (unlikely(PageKsm(page))) - unlock_page_memcg(page); - - /* address might be in next vma when migration races vma_adjust */ - else if (first) - __page_set_anon_rmap(page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); - else - __page_check_anon_rmap(page, vma, address); + if (likely(!folio_test_ksm(folio))) { + /* address might be in next vma when migration races vma_merge */ + if (first) + __page_set_anon_rmap(folio, page, vma, address, + !!(flags & RMAP_EXCLUSIVE)); + else + __page_check_anon_rmap(page, vma, address); + } - mlock_vma_page(page, vma, compound); + mlock_vma_folio(folio, vma, compound); } /** - * page_add_new_anon_rmap - add mapping to a new anonymous page - * @page: the page to add the mapping to + * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. + * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * If it's a compound page, it is accounted as a compound page. As the page - * is new, it's assume to get mapped exclusively by a single process. - * - * Same as page_add_anon_rmap but must only be called on *new* pages. + * Like page_add_anon_rmap() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. - * Page does not have to be locked. + * The folio does not have to be locked. + * + * If the folio is large, it is accounted as a THP. As the folio + * is new, it's assumed to be mapped exclusively by a single process. */ -void page_add_new_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, + unsigned long address) { int nr; VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); - __SetPageSwapBacked(page); + __folio_set_swapbacked(folio); - if (likely(!PageCompound(page))) { + if (likely(!folio_test_pmd_mappable(folio))) { /* increment count (starts at -1) */ - atomic_set(&page->_mapcount, 0); + atomic_set(&folio->_mapcount, 0); nr = 1; } else { - VM_BUG_ON_PAGE(!PageTransHuge(page), page); /* increment count (starts at -1) */ - atomic_set(compound_mapcount_ptr(page), 0); - atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED); - nr = thp_nr_pages(page); - __mod_lruvec_page_state(page, NR_ANON_THPS, nr); + atomic_set(&folio->_entire_mapcount, 0); + atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); + nr = folio_nr_pages(folio); + __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } - __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); - __page_set_anon_rmap(page, vma, address, 1); + __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); + __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } /** @@ -1321,35 +1310,33 @@ void page_add_new_anon_rmap(struct page *page, * * The caller needs to hold the pte lock. */ -void page_add_file_rmap(struct page *page, - struct vm_area_struct *vma, bool compound) +void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, + bool compound) { - atomic_t *mapped; + struct folio *folio = page_folio(page); + atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool first; VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); - lock_page_memcg(page); /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { first = atomic_inc_and_test(&page->_mapcount); nr = first; - if (first && PageCompound(page)) { - mapped = subpages_mapcount_ptr(compound_head(page)); + if (first && folio_test_large(folio)) { nr = atomic_inc_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } - } else if (PageTransHuge(page)) { + } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ - first = atomic_inc_and_test(compound_mapcount_ptr(page)); + first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { - mapped = subpages_mapcount_ptr(page); nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = thp_nr_pages(page); - nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); + nr_pmdmapped = folio_nr_pages(folio); + nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; @@ -1361,13 +1348,12 @@ void page_add_file_rmap(struct page *page, } if (nr_pmdmapped) - __mod_lruvec_page_state(page, PageSwapBacked(page) ? + __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); if (nr) - __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); - unlock_page_memcg(page); + __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); - mlock_vma_page(page, vma, compound); + mlock_vma_folio(folio, vma, compound); } /** @@ -1378,43 +1364,41 @@ void page_add_file_rmap(struct page *page, * * The caller needs to hold the pte lock. */ -void page_remove_rmap(struct page *page, - struct vm_area_struct *vma, bool compound) +void page_remove_rmap(struct page *page, struct vm_area_struct *vma, + bool compound) { - atomic_t *mapped; + struct folio *folio = page_folio(page); + atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool last; + enum node_stat_item idx; VM_BUG_ON_PAGE(compound && !PageHead(page), page); /* Hugetlb pages are not counted in NR_*MAPPED */ - if (unlikely(PageHuge(page))) { + if (unlikely(folio_test_hugetlb(folio))) { /* hugetlb pages are always mapped with pmds */ - atomic_dec(compound_mapcount_ptr(page)); + atomic_dec(&folio->_entire_mapcount); return; } - lock_page_memcg(page); - /* Is page being unmapped by PTE? Is this its last map to be removed? */ if (likely(!compound)) { last = atomic_add_negative(-1, &page->_mapcount); nr = last; - if (last && PageCompound(page)) { - mapped = subpages_mapcount_ptr(compound_head(page)); + if (last && folio_test_large(folio)) { nr = atomic_dec_return_relaxed(mapped); nr = (nr < COMPOUND_MAPPED); } - } else if (PageTransHuge(page)) { + } else if (folio_test_pmd_mappable(folio)) { /* That test is redundant: it's for safety or to optimize out */ - last = atomic_add_negative(-1, compound_mapcount_ptr(page)); + last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { - mapped = subpages_mapcount_ptr(page); nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); if (likely(nr < COMPOUND_MAPPED)) { - nr_pmdmapped = thp_nr_pages(page); - nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); + nr_pmdmapped = folio_nr_pages(folio); + nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; @@ -1426,34 +1410,37 @@ void page_remove_rmap(struct page *page, } if (nr_pmdmapped) { - __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS : - (PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED : - NR_FILE_PMDMAPPED), -nr_pmdmapped); + if (folio_test_anon(folio)) + idx = NR_ANON_THPS; + else if (folio_test_swapbacked(folio)) + idx = NR_SHMEM_PMDMAPPED; + else + idx = NR_FILE_PMDMAPPED; + __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); } if (nr) { - __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED : - NR_FILE_MAPPED, -nr); + idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; + __lruvec_stat_mod_folio(folio, idx, -nr); + /* - * Queue anon THP for deferred split if at least one small - * page of the compound page is unmapped, but at least one - * small page is still mapped. + * Queue anon THP for deferred split if at least one + * page of the folio is unmapped and at least one page + * is still mapped. */ - if (PageTransCompound(page) && PageAnon(page)) + if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) if (!compound || nr < nr_pmdmapped) - deferred_split_huge_page(compound_head(page)); + deferred_split_folio(folio); } /* - * It would be tidy to reset PageAnon mapping when fully unmapped, - * but that might overwrite a racing page_add_anon_rmap - * which increments mapcount after us but sets mapping - * before us: so leave the reset to free_pages_prepare, - * and remember that it's only reliable while mapped. + * It would be tidy to reset folio_test_anon mapping when fully + * unmapped, but that might overwrite a racing page_add_anon_rmap + * which increments mapcount after us but sets mapping before us: + * so leave the reset to free_pages_prepare, and remember that + * it's only reliable while mapped. */ - unlock_page_memcg(page); - - munlock_vma_page(page, vma, compound); + munlock_vma_folio(folio, vma, compound); } /* @@ -1491,7 +1478,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, * try_to_unmap() must hold a reference on the folio. */ range.end = vma_address_end(&pvmw); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* @@ -1725,17 +1712,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } - /* - * Note: We *don't* remember if the page was mapped - * exclusively in the swap pte if the architecture - * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In - * that case, swapin code has to re-determine that - * manually and might detect the page as possibly - * shared, for example, if there are other references on - * the page or if the page is under writeback. We made - * sure that there are no GUP pins on the page that - * would rely on it, so for GUP pins this is fine. - */ if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) @@ -1779,7 +1755,7 @@ discard: */ page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + mlock_drain_local(); folio_put(folio); } @@ -1866,7 +1842,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, * try_to_unmap() must hold a reference on the page. */ range.end = vma_address_end(&pvmw); - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, range.end); if (folio_test_hugetlb(folio)) { /* @@ -1976,7 +1952,21 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, } else { flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); /* Nuke the page table entry. */ - pteval = ptep_clear_flush(vma, address, pvmw.pte); + if (should_defer_flush(mm, flags)) { + /* + * We clear the PTE but do not flush so potentially + * a remote CPU could still be writing to the folio. + * If the entry was previously clean then the + * architecture must guarantee that a clear->dirty + * transition on a cached TLB entry is written through + * and traps if the PTE is unmapped. + */ + pteval = ptep_get_and_clear(mm, address, pvmw.pte); + + set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); + } else { + pteval = ptep_clear_flush(vma, address, pvmw.pte); + } } /* Set the dirty flag on the folio now the pte is gone. */ @@ -2120,7 +2110,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); if (vma->vm_flags & VM_LOCKED) - mlock_page_drain_local(); + mlock_drain_local(); folio_put(folio); } @@ -2148,10 +2138,10 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags) /* * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and - * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. + * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | - TTU_SYNC))) + TTU_SYNC | TTU_BATCH_FLUSH))) return; if (folio_is_zone_device(folio) && @@ -2196,7 +2186,7 @@ static bool page_make_device_exclusive_one(struct folio *folio, swp_entry_t entry; pte_t swp_pte; - mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, address + folio_size(folio)), args->owner); @@ -2543,27 +2533,28 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { + struct folio *folio = page_folio(page); struct anon_vma *anon_vma = vma->anon_vma; int first; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); BUG_ON(!anon_vma); - /* address might be in next vma when migration races vma_adjust */ - first = atomic_inc_and_test(compound_mapcount_ptr(page)); + /* address might be in next vma when migration races vma_merge */ + first = atomic_inc_and_test(&folio->_entire_mapcount); VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (first) - __page_set_anon_rmap(page, vma, address, + __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); } -void hugepage_add_new_anon_rmap(struct page *page, +void hugepage_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ - atomic_set(compound_mapcount_ptr(page), 0); - ClearHPageRestoreReserve(page); - __page_set_anon_rmap(page, vma, address, 1); + atomic_set(&folio->_entire_mapcount, 0); + folio_clear_hugetlb_restore_reserve(folio); + __page_set_anon_rmap(folio, &folio->page, vma, address, 1); } #endif /* CONFIG_HUGETLB_PAGE */ |