summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-09-13 14:51:12 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-10-04 10:32:27 -0700
commit132b180f06a74ddfc526709928036db3b7a1cf6d (patch)
tree334eadd59db3cfef2d05b32f05438bd8378aebf0 /mm/rmap.c
parenta1f34ee1de2c3a55bc2a6b9a38e1ecd2830dcc03 (diff)
downloadlwn-132b180f06a74ddfc526709928036db3b7a1cf6d.tar.gz
lwn-132b180f06a74ddfc526709928036db3b7a1cf6d.zip
mm/rmap: simplify PageAnonExclusive sanity checks when adding anon rmap
Let's sanity-check PageAnonExclusive vs. mapcount in page_add_anon_rmap() and hugepage_add_anon_rmap() after setting PageAnonExclusive simply by re-reading the mapcounts. We can stop initializing the "first" variable in page_add_anon_rmap() and no longer need an atomic_inc_and_test() in hugepage_add_anon_rmap(). While at it, switch to VM_WARN_ON_FOLIO(). [david@redhat.com: update check for doubly-mapped page] Link: https://lkml.kernel.org/r/d8e5a093-2e22-c14b-7e64-6da280398d9f@redhat.com Link: https://lkml.kernel.org/r/20230913125113.313322-6-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 9b40c3feba3e..ed4b602bcbd5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1199,7 +1199,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
atomic_t *mapped = &folio->_nr_pages_mapped;
int nr = 0, nr_pmdmapped = 0;
bool compound = flags & RMAP_COMPOUND;
- bool first = true;
+ bool first;
/* Is page being mapped by PTE? Is this its first map to be added? */
if (likely(!compound)) {
@@ -1228,9 +1228,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
}
- VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
- VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
-
if (nr_pmdmapped)
__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped);
if (nr)
@@ -1252,6 +1249,10 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
+ /* While PTE-mapping a THP we have a PMD and a PTE mapping. */
+ VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 ||
+ (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) &&
+ PageAnonExclusive(page), folio);
mlock_vma_folio(folio, vma, compound);
}
@@ -2545,15 +2546,14 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
struct folio *folio = page_folio(page);
- int first;
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- first = atomic_inc_and_test(&folio->_entire_mapcount);
- VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page);
- VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page);
+ atomic_inc(&folio->_entire_mapcount);
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(page);
+ VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
+ PageAnonExclusive(page), folio);
}
void hugepage_add_new_anon_rmap(struct folio *folio,