summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPasha Tatashin <pasha.tatashin@soleen.com>2023-01-17 20:46:17 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:22 -0800
commit7ec7096b8577d3b899c1dae456a414f2d08c7ddb (patch)
tree61fcd4e096bddc8dac5b223b8c5b14f4df7b0184
parent64517d6e1291b5e942b00c53674ecf33f918313f (diff)
downloadlwn-7ec7096b8577d3b899c1dae456a414f2d08c7ddb.tar.gz
lwn-7ec7096b8577d3b899c1dae456a414f2d08c7ddb.zip
mm/page_ext: init page_ext early if there are no deferred struct pages
page_ext must be initialized after all struct pages are initialized. Therefore, page_ext is initialized after page_alloc_init_late(), and can optionally be initialized earlier via early_page_ext kernel parameter which as a side effect also disables deferred struct pages. Allow to automatically init page_ext early when there are no deferred struct pages in order to be able to use page_ext during kernel boot and track for example page allocations early. [pasha.tatashin@soleen.com: fix build with CONFIG_PAGE_EXTENSION=n] Link: https://lkml.kernel.org/r/20230118155251.2522985-1-pasha.tatashin@soleen.com Link: https://lkml.kernel.org/r/20230117204617.1553748-1-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Charan Teja Kalla <quic_charante@quicinc.com> Cc: David Hildenbrand <david@redhat.com> Cc: Li Zhe <lizhe.67@bytedance.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/page_ext.h2
-rw-r--r--init/main.c6
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/page_ext.c2
4 files changed, 11 insertions, 5 deletions
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 67314f648aeb..bc2e39090a1f 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -29,6 +29,8 @@ struct page_ext_operations {
bool need_shared_flags;
};
+extern bool deferred_struct_pages;
+
#ifdef CONFIG_PAGE_EXTENSION
/*
diff --git a/init/main.c b/init/main.c
index e1c3911d7c70..64cd2ff051c4 100644
--- a/init/main.c
+++ b/init/main.c
@@ -855,8 +855,8 @@ static void __init mm_init(void)
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
- /* Should be run after vmap initialization */
- if (early_page_ext_enabled())
+ /* If no deferred init page_ext now, as vmap is fully initialized */
+ if (!deferred_struct_pages)
page_ext_init();
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
@@ -1628,7 +1628,7 @@ static noinline void __init kernel_init_freeable(void)
padata_init();
page_alloc_init_late();
/* Initialize page ext after all struct pages are initialized. */
- if (!early_page_ext_enabled())
+ if (deferred_struct_pages)
page_ext_init();
do_basic_setup();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0cfad30fb44c..ecb9e9acfe7f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -430,6 +430,8 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
+bool deferred_struct_pages __meminitdata;
+
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
* During boot we initialize deferred pages on-demand, as needed, but once
@@ -6803,8 +6805,10 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
if (context == MEMINIT_EARLY) {
if (overlap_memmap_init(zone, &pfn))
continue;
- if (defer_init(nid, pfn, zone_end_pfn))
+ if (defer_init(nid, pfn, zone_end_pfn)) {
+ deferred_struct_pages = true;
break;
+ }
}
page = pfn_to_page(pfn);
diff --git a/mm/page_ext.c b/mm/page_ext.c
index e2c22ffdbb81..dc1626be458b 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -92,7 +92,7 @@ unsigned long page_ext_size;
static unsigned long total_usage;
static struct page_ext *lookup_page_ext(const struct page *page);
-bool early_page_ext;
+bool early_page_ext __meminitdata;
static int __init setup_early_page_ext(char *str)
{
early_page_ext = true;