summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-22 17:03:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-22 17:03:12 -0700
commit9030fb0bb9d607908d51f9ee02efdbe01da355ee (patch)
tree9ee1d9d47fbb4b30c7f5cbc291432e666e58967a /mm/hugetlb.c
parent3bf03b9a0839c9fb06927ae53ebd0f960b19d408 (diff)
parent2a3c4bce3edb0d54983384aa8a88c0da330638f4 (diff)
downloadlwn-9030fb0bb9d607908d51f9ee02efdbe01da355ee.tar.gz
lwn-9030fb0bb9d607908d51f9ee02efdbe01da355ee.zip
Merge tag 'folio-5.18c' of git://git.infradead.org/users/willy/pagecache
Pull folio updates from Matthew Wilcox: - Rewrite how munlock works to massively reduce the contention on i_mmap_rwsem (Hugh Dickins): https://lore.kernel.org/linux-mm/8e4356d-9622-a7f0-b2c-f116b5f2efea@google.com/ - Sort out the page refcount mess for ZONE_DEVICE pages (Christoph Hellwig): https://lore.kernel.org/linux-mm/20220210072828.2930359-1-hch@lst.de/ - Convert GUP to use folios and make pincount available for order-1 pages. (Matthew Wilcox) - Convert a few more truncation functions to use folios (Matthew Wilcox) - Convert page_vma_mapped_walk to use PFNs instead of pages (Matthew Wilcox) - Convert rmap_walk to use folios (Matthew Wilcox) - Convert most of shrink_page_list() to use a folio (Matthew Wilcox) - Add support for creating large folios in readahead (Matthew Wilcox) * tag 'folio-5.18c' of git://git.infradead.org/users/willy/pagecache: (114 commits) mm/damon: minor cleanup for damon_pa_young selftests/vm/transhuge-stress: Support file-backed PMD folios mm/filemap: Support VM_HUGEPAGE for file mappings mm/readahead: Switch to page_cache_ra_order mm/readahead: Align file mappings for non-DAX mm/readahead: Add large folio readahead mm: Support arbitrary THP sizes mm: Make large folios depend on THP mm: Fix READ_ONLY_THP warning mm/filemap: Allow large folios to be added to the page cache mm: Turn can_split_huge_page() into can_split_folio() mm/vmscan: Convert pageout() to take a folio mm/vmscan: Turn page_check_references() into folio_check_references() mm/vmscan: Account large folios correctly mm/vmscan: Optimise shrink_page_list for non-PMD-sized folios mm/vmscan: Free non-shmem folios without splitting them mm/rmap: Constify the rmap_walk_control argument mm/rmap: Convert rmap_walk() to take a folio mm: Turn page_anon_vma() into folio_anon_vma() mm/rmap: Turn page_lock_anon_vma_read() into folio_lock_anon_vma_read() ...
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 75b41879e9e9..b34f50156f7e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1321,7 +1321,9 @@ static void __destroy_compound_gigantic_page(struct page *page,
}
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
}
@@ -1813,7 +1815,9 @@ out_error:
for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
__ClearPageReserved(p);
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
return false;
}
@@ -5013,7 +5017,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
set_page_dirty(page);
hugetlb_count_sub(pages_per_huge_page(h), mm);
- page_remove_rmap(page, true);
+ page_remove_rmap(page, vma, true);
spin_unlock(ptl);
tlb_remove_page_size(tlb, page, huge_page_size(h));
@@ -5258,7 +5262,7 @@ retry_avoidcopy:
/* Break COW */
huge_ptep_clear_flush(vma, haddr, ptep);
mmu_notifier_invalidate_range(mm, range.start, range.end);
- page_remove_rmap(old_page, true);
+ page_remove_rmap(old_page, vma, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
set_huge_pte_at(mm, haddr, ptep,
make_huge_pte(vma, new_page, 1));
@@ -6074,7 +6078,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (pages) {
/*
- * try_grab_compound_head() should always succeed here,
+ * try_grab_folio() should always succeed here,
* because: a) we hold the ptl lock, and b) we've just
* checked that the huge page is present in the page
* tables. If the huge page is present, then the tail
@@ -6083,9 +6087,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
* any way. So this page must be available at this
* point, unless the page refcount overflowed:
*/
- if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
- refs,
- flags))) {
+ if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs,
+ flags))) {
spin_unlock(ptl);
remainder = 0;
err = -ENOMEM;