From fc1878ec70ede56ee48f2d65525d4f7c6888b496 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Sat, 1 Jul 2023 11:28:53 +0800 Subject: mm: remove page_rmapping() After converting the last user to folio_raw_mapping(), we can safely remove the function. Link: https://lkml.kernel.org/r/20230701032853.258697-3-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Reviewed-by: Sidhartha Kumar Reviewed-by: Matthew Wilcox (Oracle) Cc: Kefeng Wang Cc: Nanyong Sun Signed-off-by: Andrew Morton --- mm/util.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'mm/util.c') diff --git a/mm/util.c b/mm/util.c index dd12b9531ac4..5e9305189c3f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -734,12 +734,6 @@ void *vcalloc(size_t n, size_t size) } EXPORT_SYMBOL(vcalloc); -/* Neutral page->mapping pointer to address_space or anon_vma or other */ -void *page_rmapping(struct page *page) -{ - return folio_raw_mapping(page_folio(page)); -} - struct anon_vma *folio_anon_vma(struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; -- cgit v1.2.3 From 29d26f1215de14721188988a59b1426abb85b7be Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 2 Aug 2023 16:13:33 +0100 Subject: mm: remove ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO Current best practice is to reuse the name of the function as a define to indicate that the function is implemented by the architecture. Link: https://lkml.kernel.org/r/20230802151406.3735276-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Mike Rapoport (IBM) Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton --- Documentation/core-api/cachetlb.rst | 24 +++++++++--------------- include/linux/cacheflush.h | 4 ++-- mm/util.c | 2 +- 3 files changed, 12 insertions(+), 18 deletions(-) (limited to 'mm/util.c') diff --git a/Documentation/core-api/cachetlb.rst b/Documentation/core-api/cachetlb.rst index b645947954fb..889fc84ccd1b 100644 --- a/Documentation/core-api/cachetlb.rst +++ b/Documentation/core-api/cachetlb.rst @@ -273,7 +273,7 @@ maps this page at its virtual address. If D-cache aliasing is not an issue, these two routines may simply call memcpy/memset directly and do nothing more. - ``void flush_dcache_page(struct page *page)`` + ``void flush_dcache_folio(struct folio *folio)`` This routines must be called when: @@ -281,7 +281,7 @@ maps this page at its virtual address. and / or in high memory b) the kernel is about to read from a page cache page and user space shared/writable mappings of this page potentially exist. Note - that {get,pin}_user_pages{_fast} already call flush_dcache_page + that {get,pin}_user_pages{_fast} already call flush_dcache_folio on any page found in the user address space and thus driver code rarely needs to take this into account. @@ -295,7 +295,7 @@ maps this page at its virtual address. The phrase "kernel writes to a page cache page" means, specifically, that the kernel executes store instructions that dirty data in that - page at the page->virtual mapping of that page. It is important to + page at the kernel virtual mapping of that page. It is important to flush here to handle D-cache aliasing, to make sure these kernel stores are visible to user space mappings of that page. @@ -306,18 +306,18 @@ maps this page at its virtual address. If D-cache aliasing is not an issue, this routine may simply be defined as a nop on that architecture. - There is a bit set aside in page->flags (PG_arch_1) as "architecture + There is a bit set aside in folio->flags (PG_arch_1) as "architecture private". The kernel guarantees that, for pagecache pages, it will clear this bit when such a page first enters the pagecache. This allows these interfaces to be implemented much more efficiently. It allows one to "defer" (perhaps indefinitely) the actual flush if there are currently no user processes mapping this - page. See sparc64's flush_dcache_page and update_mmu_cache_range + page. See sparc64's flush_dcache_folio and update_mmu_cache_range implementations for an example of how to go about doing this. - The idea is, first at flush_dcache_page() time, if - page_file_mapping() returns a mapping, and mapping_mapped on that + The idea is, first at flush_dcache_folio() time, if + folio_flush_mapping() returns a mapping, and mapping_mapped() on that mapping returns %false, just mark the architecture private page flag bit. Later, in update_mmu_cache_range(), a check is made of this flag bit, and if set the flush is done and the flag bit @@ -331,12 +331,6 @@ maps this page at its virtual address. dirty. Again, see sparc64 for examples of how to deal with this. - ``void flush_dcache_folio(struct folio *folio)`` - This function is called under the same circumstances as - flush_dcache_page(). It allows the architecture to - optimise for flushing the entire folio of pages instead - of flushing one page at a time. - ``void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long user_vaddr, void *dst, void *src, int len)`` ``void copy_from_user_page(struct vm_area_struct *vma, struct page *page, @@ -357,7 +351,7 @@ maps this page at its virtual address. When the kernel needs to access the contents of an anonymous page, it calls this function (currently only - get_user_pages()). Note: flush_dcache_page() deliberately + get_user_pages()). Note: flush_dcache_folio() deliberately doesn't work for an anonymous page. The default implementation is a nop (and should remain so for all coherent architectures). For incoherent architectures, it should flush @@ -374,7 +368,7 @@ maps this page at its virtual address. ``void flush_icache_page(struct vm_area_struct *vma, struct page *page)`` All the functionality of flush_icache_page can be implemented in - flush_dcache_page and update_mmu_cache_range. In the future, the hope + flush_dcache_folio and update_mmu_cache_range. In the future, the hope is to remove this interface completely. The final category of APIs is for I/O to deliberately aliased address diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h index a6189d21f2ba..82136f3fcf54 100644 --- a/include/linux/cacheflush.h +++ b/include/linux/cacheflush.h @@ -7,14 +7,14 @@ struct folio; #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO +#ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio); #endif #else static inline void flush_dcache_folio(struct folio *folio) { } -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0 +#define flush_dcache_folio flush_dcache_folio #endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ #endif /* _LINUX_CACHEFLUSH_H */ diff --git a/mm/util.c b/mm/util.c index 5e9305189c3f..cde229b05eb3 100644 --- a/mm/util.c +++ b/mm/util.c @@ -1119,7 +1119,7 @@ void page_offline_end(void) } EXPORT_SYMBOL(page_offline_end); -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO +#ifndef flush_dcache_folio void flush_dcache_folio(struct folio *folio) { long i, nr = folio_nr_pages(folio); -- cgit v1.2.3 From 3d2c908768877714a354ee6d7bf93e801400d5e2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 21 Aug 2023 18:08:48 +0200 Subject: mm/swap: inline folio_set_swap_entry() and folio_swap_entry() Let's simply work on the folio directly and remove the helpers. Link: https://lkml.kernel.org/r/20230821160849.531668-4-david@redhat.com Signed-off-by: David Hildenbrand Suggested-by: Matthew Wilcox Reviewed-by: Chris Li Cc: Catalin Marinas Cc: Dan Streetman Cc: Hugh Dickins Cc: Peter Xu Cc: Seth Jennings Cc: Vitaly Wool Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/swap.h | 12 +----------- mm/memory.c | 2 +- mm/shmem.c | 6 +++--- mm/swap_state.c | 7 +++---- mm/swapfile.c | 2 +- mm/util.c | 2 +- mm/vmscan.c | 2 +- mm/zswap.c | 4 ++-- 8 files changed, 13 insertions(+), 24 deletions(-) (limited to 'mm/util.c') diff --git a/include/linux/swap.h b/include/linux/swap.h index 352eca0a75bc..493487ed7c38 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -333,25 +333,15 @@ struct swap_info_struct { */ }; -static inline swp_entry_t folio_swap_entry(struct folio *folio) -{ - return folio->swap; -} - static inline swp_entry_t page_swap_entry(struct page *page) { struct folio *folio = page_folio(page); - swp_entry_t entry = folio_swap_entry(folio); + swp_entry_t entry = folio->swap; entry.val += folio_page_idx(folio, page); return entry; } -static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry) -{ - folio->swap = entry; -} - /* linux/mm/workingset.c */ bool workingset_test_recent(void *shadow, bool file, bool *workingset); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); diff --git a/mm/memory.c b/mm/memory.c index d104a38e8545..421fcef3a3e7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3828,7 +3828,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_add_lru(folio); /* To provide entry to swap_readpage() */ - folio_set_swap_entry(folio, entry); + folio->swap = entry; swap_readpage(page, true, NULL); folio->private = NULL; } diff --git a/mm/shmem.c b/mm/shmem.c index 99fb60ec2c3d..980289be5f63 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1642,7 +1642,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, int error; old = *foliop; - entry = folio_swap_entry(old); + entry = old->swap; swap_index = swp_offset(entry); swap_mapping = swap_address_space(entry); @@ -1663,7 +1663,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, __folio_set_locked(new); __folio_set_swapbacked(new); folio_mark_uptodate(new); - folio_set_swap_entry(new, entry); + new->swap = entry; folio_set_swapcache(new); /* @@ -1785,7 +1785,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, /* We have to do this with folio locked to prevent races */ folio_lock(folio); if (!folio_test_swapcache(folio) || - folio_swap_entry(folio).val != swap.val || + folio->swap.val != swap.val || !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; goto unlock; diff --git a/mm/swap_state.c b/mm/swap_state.c index 2f2417810052..b3b14bd0dd64 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -100,7 +100,7 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry, folio_ref_add(folio, nr); folio_set_swapcache(folio); - folio_set_swap_entry(folio, entry); + folio->swap = entry; do { xas_lock_irq(&xas); @@ -156,8 +156,7 @@ void __delete_from_swap_cache(struct folio *folio, VM_BUG_ON_PAGE(entry != folio, entry); xas_next(&xas); } - entry.val = 0; - folio_set_swap_entry(folio, entry); + folio->swap.val = 0; folio_clear_swapcache(folio); address_space->nrpages -= nr; __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); @@ -233,7 +232,7 @@ fail: */ void delete_from_swap_cache(struct folio *folio) { - swp_entry_t entry = folio_swap_entry(folio); + swp_entry_t entry = folio->swap; struct address_space *address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); diff --git a/mm/swapfile.c b/mm/swapfile.c index bd9d904671b9..e52f486834eb 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1536,7 +1536,7 @@ unlock_out: static bool folio_swapped(struct folio *folio) { - swp_entry_t entry = folio_swap_entry(folio); + swp_entry_t entry = folio->swap; struct swap_info_struct *si = _swap_info_get(entry); if (!si) diff --git a/mm/util.c b/mm/util.c index cde229b05eb3..f31e2ca62cfa 100644 --- a/mm/util.c +++ b/mm/util.c @@ -764,7 +764,7 @@ struct address_space *folio_mapping(struct folio *folio) return NULL; if (unlikely(folio_test_swapcache(folio))) - return swap_address_space(folio_swap_entry(folio)); + return swap_address_space(folio->swap); mapping = folio->mapping; if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) diff --git a/mm/vmscan.c b/mm/vmscan.c index c7c149cb8d66..6f13394b112e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1423,7 +1423,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio, } if (folio_test_swapcache(folio)) { - swp_entry_t swap = folio_swap_entry(folio); + swp_entry_t swap = folio->swap; if (reclaimed && !mapping_exiting(mapping)) shadow = workingset_eviction(folio, target_memcg); diff --git a/mm/zswap.c b/mm/zswap.c index 7300b98d4a03..412b1409a0d7 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1190,7 +1190,7 @@ static void zswap_fill_page(void *ptr, unsigned long value) bool zswap_store(struct folio *folio) { - swp_entry_t swp = folio_swap_entry(folio); + swp_entry_t swp = folio->swap; int type = swp_type(swp); pgoff_t offset = swp_offset(swp); struct page *page = &folio->page; @@ -1370,7 +1370,7 @@ shrink: bool zswap_load(struct folio *folio) { - swp_entry_t swp = folio_swap_entry(folio); + swp_entry_t swp = folio->swap; int type = swp_type(swp); pgoff_t offset = swp_offset(swp); struct page *page = &folio->page; -- cgit v1.2.3