diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-04-03 18:18:30 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-25 20:56:32 -0700 |
commit | 4746f5ce0fa52e21b5fe432970fe9516d1a45ebc (patch) | |
tree | ea1b91650857ed9df0c3957277e79460387a513b /mm/khugepaged.c | |
parent | e1e13262f0d6e58a6439d437c386bb3392aaca0b (diff) | |
download | lwn-4746f5ce0fa52e21b5fe432970fe9516d1a45ebc.tar.gz lwn-4746f5ce0fa52e21b5fe432970fe9516d1a45ebc.zip |
khugepaged: inline hpage_collapse_alloc_folio()
Patch series "khugepaged folio conversions".
We've been kind of hacking piecemeal at converting khugepaged to use
folios instead of compound pages, and so this patchset is a little larger
than it should be as I undo some of our wrong moves in the past. In
particular, collapse_file() now consistently uses 'new_folio' for the
freshly allocated folio and 'folio' for the one that's currently in use.
This patch (of 7):
This function has one caller, and the combined function is simpler to
read, reason about and modify.
Link: https://lkml.kernel.org/r/20240403171838.1445826-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20240403171838.1445826-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r-- | mm/khugepaged.c | 19 |
1 files changed, 4 insertions, 15 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 38830174608f..ad16dd8b26a8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -891,20 +891,6 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc) } #endif -static bool hpage_collapse_alloc_folio(struct folio **folio, gfp_t gfp, int node, - nodemask_t *nmask) -{ - *folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, nmask); - - if (unlikely(!*folio)) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - return false; - } - - count_vm_event(THP_COLLAPSE_ALLOC); - return true; -} - /* * If mmap_lock temporarily dropped, revalidate vma * before taking mmap_lock. @@ -1067,11 +1053,14 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, int node = hpage_collapse_find_target_node(cc); struct folio *folio; - if (!hpage_collapse_alloc_folio(&folio, gfp, node, &cc->alloc_nmask)) { + folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask); + if (!folio) { *hpage = NULL; + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); return SCAN_ALLOC_HUGE_PAGE_FAIL; } + count_vm_event(THP_COLLAPSE_ALLOC); if (unlikely(mem_cgroup_charge(folio, mm, gfp))) { folio_put(folio); *hpage = NULL; |