summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2022-03-24 18:09:52 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 19:06:45 -0700
commit85207ad8ea21156387fd0273e5360189df163661 (patch)
tree3740f14d9c0660c16bdc64fdfaf0c7134942ba5c /mm/filemap.c
parentbb43b14b576228c580bdc7e1aeeded54d540b5ef (diff)
downloadlwn-85207ad8ea21156387fd0273e5360189df163661.tar.gz
lwn-85207ad8ea21156387fd0273e5360189df163661.zip
mm: filemap_unaccount_folio() large skip mapcount fixup
The page_mapcount_reset() when folio_mapped() while mapping_exiting() was devised long before there were huge or compound pages in the cache. It is still valid for small pages, but not at all clear what's right to check and reset on large pages. Just don't try when folio_test_large(). Link: https://lkml.kernel.org/r/879c4426-4122-da9c-1a86-697f2c9a083@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 98dfeff82ef0..c5c169c16bae 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -152,25 +152,25 @@ static void filemap_unaccount_folio(struct address_space *mapping,
VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
- int mapcount;
-
pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
current->comm, folio_pfn(folio));
dump_page(&folio->page, "still mapped when deleted");
dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
- mapcount = page_mapcount(&folio->page);
- if (mapping_exiting(mapping) &&
- folio_ref_count(folio) >= mapcount + 2) {
- /*
- * All vmas have already been torn down, so it's
- * a good bet that actually the folio is unmapped,
- * and we'd prefer not to leak it: if we're wrong,
- * some other bad page check should catch it later.
- */
- page_mapcount_reset(&folio->page);
- folio_ref_sub(folio, mapcount);
+ if (mapping_exiting(mapping) && !folio_test_large(folio)) {
+ int mapcount = page_mapcount(&folio->page);
+
+ if (folio_ref_count(folio) >= mapcount + 2) {
+ /*
+ * All vmas have already been torn down, so it's
+ * a good bet that actually the page is unmapped
+ * and we'd rather not leak it: if we're wrong,
+ * another bad page check should catch it later.
+ */
+ page_mapcount_reset(&folio->page);
+ folio_ref_sub(folio, mapcount);
+ }
}
}