summaryrefslogtreecommitdiff
path: root/mm/internal.h
diff options
context:
space:
mode:
authorVishal Moola (Oracle) <vishal.moola@gmail.com>2024-09-14 12:41:18 -0700
committerAndrew Morton <akpm@linux-foundation.org>2024-09-17 00:58:04 -0700
commit2a058ab3286d6475b2082b90c2d2182d2fea4b39 (patch)
treee521b1aa290facd3b3dc9d63a662be1520450aca /mm/internal.h
parentb4afe4183ec77f230851ea139d91e5cf2644c68b (diff)
downloadlwn-2a058ab3286d6475b2082b90c2d2182d2fea4b39.tar.gz
lwn-2a058ab3286d6475b2082b90c2d2182d2fea4b39.zip
mm: change vmf_anon_prepare() to __vmf_anon_prepare()
Some callers of vmf_anon_prepare() may not want us to release the per-VMA lock ourselves. Rename vmf_anon_prepare() to __vmf_anon_prepare() and let the callers drop the lock when desired. Also, make vmf_anon_prepare() a wrapper that releases the per-VMA lock itself for any callers that don't care. This is in preparation to fix this bug reported by syzbot: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/ Link: https://lkml.kernel.org/r/20240914194243.245-1-vishal.moola@gmail.com Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()") Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/ Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/internal.h b/mm/internal.h
index b4d86436565b..a963f67d3452 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -310,7 +310,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
wake_up(wqh);
}
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
+static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+{
+ vm_fault_t ret = __vmf_anon_prepare(vmf);
+
+ if (unlikely(ret & VM_FAULT_RETRY))
+ vma_end_read(vmf->vma);
+ return ret;
+}
+
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);