diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-10-05 21:01:12 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-11-07 14:38:07 -0800 |
commit | f7470591f8db1a72ce9f7ab49cb13c2b21b92002 (patch) | |
tree | 74216b10aff78c9bcd324f438b9b911f3a9d1a69 | |
parent | e664c2cd98cbf5e6fcc3ee92b5f3fc8f7f35e11e (diff) | |
download | lwn-f7470591f8db1a72ce9f7ab49cb13c2b21b92002.tar.gz lwn-f7470591f8db1a72ce9f7ab49cb13c2b21b92002.zip |
mm: convert page_to_pgoff() to page_pgoff()
Patch series "page->index removals in mm", v2.
As part of shrinking struct page, we need to stop using page->index. This
patchset gets rid of most of the remaining references to page->index in
mm, as well as increasing the number of functions which take a const
folio/page pointer. It shrinks the text segment of mm by a few hundred
bytes in my test config, probably mostly from removing calls to
compound_head() in page_to_pgoff().
This patch (of 7):
Change the function signature to pass in the folio as all three callers
have it. This removes a reference to page->index, which we're trying to
get rid of. And add kernel-doc.
Link: https://lkml.kernel.org/r/20241005200121.3231142-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20241005200121.3231142-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 2 | ||||
-rw-r--r-- | include/linux/pagemap.h | 31 | ||||
-rw-r--r-- | mm/memory-failure.c | 4 | ||||
-rw-r--r-- | mm/rmap.c | 2 |
4 files changed, 21 insertions, 18 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 08e487e5850a..78848fbefe94 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1895,7 +1895,7 @@ static inline unsigned long page_to_section(const struct page *page) * * Return: The Page Frame Number of the first page in the folio. */ -static inline unsigned long folio_pfn(struct folio *folio) +static inline unsigned long folio_pfn(const struct folio *folio) { return page_to_pfn(&folio->page); } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 68a5f1ff3301..bcf0865a38ae 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1011,22 +1011,25 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, return read_cache_folio(mapping, index, NULL, file); } -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). +/** + * page_pgoff - Calculate the logical page offset of this page. + * @folio: The folio containing this page. + * @page: The page which we need the offset of. + * + * For file pages, this is the offset from the beginning of the file + * in units of PAGE_SIZE. For anonymous pages, this is the offset from + * the beginning of the anon_vma in units of PAGE_SIZE. This will + * return nonsense for KSM pages. + * + * Context: Caller must have a reference on the folio or otherwise + * prevent it from being split or freed. + * + * Return: The offset in units of PAGE_SIZE. */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_pgoff(const struct folio *folio, + const struct page *page) { - struct page *head; - - if (likely(!PageTransTail(page))) - return page->index; - - head = compound_head(page); - /* - * We don't initialize ->index for tail pages: calculate based on - * head page - */ - return head->index + page - head; + return folio->index + folio_page_idx(folio, page); } /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 96ce31e5a203..58a3d80961a4 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -617,7 +617,7 @@ static void collect_procs_anon(struct folio *folio, struct page *page, if (av == NULL) /* Not actually mapped anymore */ return; - pgoff = page_to_pgoff(page); + pgoff = page_pgoff(folio, page); rcu_read_lock(); for_each_process(tsk) { struct vm_area_struct *vma; @@ -653,7 +653,7 @@ static void collect_procs_file(struct folio *folio, struct page *page, i_mmap_lock_read(mapping); rcu_read_lock(); - pgoff = page_to_pgoff(page); + pgoff = page_pgoff(folio, page); for_each_process(tsk) { struct task_struct *t = task_early_kill(tsk, force_early); unsigned long addr; diff --git a/mm/rmap.c b/mm/rmap.c index 4d75433330f9..77c6b27cb0ee 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1276,7 +1276,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); - VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), + VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), page); } |