From 672aa27d0bd241759376e62b78abb8aae1792479 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 16 Jan 2023 19:28:26 +0000 Subject: mm: remove munlock_vma_page() All callers now have a folio and can call munlock_vma_folio(). Update the documentation to refer to munlock_vma_folio(). Link: https://lkml.kernel.org/r/20230116192827.2146732-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- Documentation/mm/unevictable-lru.rst | 4 ++-- kernel/events/uprobes.c | 1 - mm/internal.h | 8 -------- mm/rmap.c | 12 ++++++------ 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst index 9257235fe904..34b8b098c5bc 100644 --- a/Documentation/mm/unevictable-lru.rst +++ b/Documentation/mm/unevictable-lru.rst @@ -486,7 +486,7 @@ Before the unevictable/mlock changes, mlocking did not mark the pages in any way, so unmapping them required no processing. For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls -munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED +munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). munlock_page() uses the mlock pagevec to batch up work to be done under @@ -510,7 +510,7 @@ which had been Copied-On-Write from the file pages now being truncated. Mlocked pages can be munlocked and deleted in this way: like with munmap(), for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls -munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED +munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). However, if there is a racing munlock(), since mlock_vma_pages_range() starts diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 29f36d2ae129..1a3904e0179c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -22,7 +22,6 @@ #include /* folio_free_swap */ #include /* user_enable_single_step */ #include /* notifier mechanism */ -#include "../../mm/internal.h" /* munlock_vma_page */ #include #include #include diff --git a/mm/internal.h b/mm/internal.h index 0b74105ea363..ce462bf145b4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -548,7 +548,6 @@ static inline void mlock_vma_folio(struct folio *folio, } void munlock_folio(struct folio *folio); - static inline void munlock_vma_folio(struct folio *folio, struct vm_area_struct *vma, bool compound) { @@ -557,11 +556,6 @@ static inline void munlock_vma_folio(struct folio *folio, munlock_folio(folio); } -static inline void munlock_vma_page(struct page *page, - struct vm_area_struct *vma, bool compound) -{ - munlock_vma_folio(page_folio(page), vma, compound); -} void mlock_new_folio(struct folio *folio); bool need_mlock_drain(int cpu); void mlock_drain_local(void); @@ -650,8 +644,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, } #else /* !CONFIG_MMU */ static inline void unmap_mapping_folio(struct folio *folio) { } -static inline void munlock_vma_page(struct page *page, - struct vm_area_struct *vma, bool compound) { } static inline void mlock_new_folio(struct folio *folio) { } static inline bool need_mlock_drain(int cpu) { return false; } static inline void mlock_drain_local(void) { } diff --git a/mm/rmap.c b/mm/rmap.c index 33e15181ae73..0b5abdda1e6b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1431,14 +1431,14 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, } /* - * It would be tidy to reset PageAnon mapping when fully unmapped, - * but that might overwrite a racing page_add_anon_rmap - * which increments mapcount after us but sets mapping - * before us: so leave the reset to free_pages_prepare, - * and remember that it's only reliable while mapped. + * It would be tidy to reset folio_test_anon mapping when fully + * unmapped, but that might overwrite a racing page_add_anon_rmap + * which increments mapcount after us but sets mapping before us: + * so leave the reset to free_pages_prepare, and remember that + * it's only reliable while mapped. */ - munlock_vma_page(page, vma, compound); + munlock_vma_folio(folio, vma, compound); } /* -- cgit v1.2.3