summaryrefslogtreecommitdiff
path: root/mm/mm_init.c
diff options
context:
space:
mode:
authorMike Rapoport (IBM) <rppt@kernel.org>2023-03-21 19:05:09 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-04-05 19:42:54 -0700
commitde57807e6f267a658a046dbca44dc40fe806d60f (patch)
treea5d1576cafd48265fa836df0168e8ed1aa5a39f8 /mm/mm_init.c
parentf2fc4b44ec2bb94c51c7ae1af9b1177d72705992 (diff)
downloadlwn-de57807e6f267a658a046dbca44dc40fe806d60f.tar.gz
lwn-de57807e6f267a658a046dbca44dc40fe806d60f.zip
init,mm: fold late call to page_ext_init() to page_alloc_init_late()
When deferred initialization of struct pages is enabled, page_ext_init() must be called after all the deferred initialization is done, but there is no point to keep it a separate call from kernel_init_freeable() right after page_alloc_init_late(). Fold the call to page_ext_init() into page_alloc_init_late() and localize deferred_struct_pages variable. Link: https://lkml.kernel.org/r/20230321170513.2401534-11-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Doug Berger <opendmb@gmail.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@kernel.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mm_init.c')
-rw-r--r--mm/mm_init.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 43f6d3ed24ef..ff70da11e797 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -225,7 +225,7 @@ static unsigned long nr_kernel_pages __initdata;
static unsigned long nr_all_pages __initdata;
static unsigned long dma_reserve __initdata;
-bool deferred_struct_pages __meminitdata;
+static bool deferred_struct_pages __meminitdata;
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
@@ -2358,6 +2358,10 @@ void __init page_alloc_init_late(void)
for_each_populated_zone(zone)
set_zone_contiguous(zone);
+
+ /* Initialize page ext after all struct pages are initialized. */
+ if (deferred_struct_pages)
+ page_ext_init();
}
#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES