summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@linux.intel.com>2021-10-18 15:15:32 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-10-18 20:22:02 -1000
commit76af6a054da4055305ddb28c5eb151b9ee4f74f9 (patch)
treee34d52b5a37b46a5bb8f7541e3976dd10efa5922 /mm
parent295be91f7ef0027fca2f2e4788e99731aa931834 (diff)
downloadlwn-76af6a054da4055305ddb28c5eb151b9ee4f74f9.tar.gz
lwn-76af6a054da4055305ddb28c5eb151b9ee4f74f9.zip
mm/migrate: add CPU hotplug to demotion #ifdef
Once upon a time, the node demotion updates were driven solely by memory hotplug events. But now, there are handlers for both CPU and memory hotplug. However, the #ifdef around the code checks only memory hotplug. A system that has HOTPLUG_CPU=y but MEMORY_HOTPLUG=n would miss CPU hotplug events. Update the #ifdef around the common code. Add memory and CPU-specific #ifdefs for their handlers. These memory/CPU #ifdefs avoid unused function warnings when their Kconfig option is off. [arnd@arndb.de: rework hotplug_memory_notifier() stub] Link: https://lkml.kernel.org/r/20211013144029.2154629-1-arnd@kernel.org Link: https://lkml.kernel.org/r/20210924161255.E5FE8F7E@davehans-spike.ostc.intel.com Fixes: 884a6e5d1f93 ("mm/migrate: update node demotion order on hotplug events") Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Wei Xu <weixugc@google.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c42
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/slab.c4
3 files changed, 24 insertions, 26 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index a6311e46f605..700112c0123d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate)
EXPORT_SYMBOL(migrate_vma_finalize);
#endif /* CONFIG_DEVICE_PRIVATE */
-#if defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_HOTPLUG_CPU)
/* Disable reclaim-based migration. */
static void __disable_all_migrate_targets(void)
{
@@ -3209,25 +3209,6 @@ static void set_migration_target_nodes(void)
}
/*
- * React to hotplug events that might affect the migration targets
- * like events that online or offline NUMA nodes.
- *
- * The ordering is also currently dependent on which nodes have
- * CPUs. That means we need CPU on/offline notification too.
- */
-static int migration_online_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-static int migration_offline_cpu(unsigned int cpu)
-{
- set_migration_target_nodes();
- return 0;
-}
-
-/*
* This leaves migrate-on-reclaim transiently disabled between
* the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
* whether reclaim-based migration is enabled or not, which
@@ -3284,6 +3265,25 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
return notifier_from_errno(0);
}
+/*
+ * React to hotplug events that might affect the migration targets
+ * like events that online or offline NUMA nodes.
+ *
+ * The ordering is also currently dependent on which nodes have
+ * CPUs. That means we need CPU on/offline notification too.
+ */
+static int migration_online_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
+static int migration_offline_cpu(unsigned int cpu)
+{
+ set_migration_target_nodes();
+ return 0;
+}
+
static int __init migrate_on_reclaim_init(void)
{
int ret;
@@ -3303,4 +3303,4 @@ static int __init migrate_on_reclaim_init(void)
return 0;
}
late_initcall(migrate_on_reclaim_init);
-#endif /* CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/mm/page_ext.c b/mm/page_ext.c
index dfb91653d359..2a52fd9ed464 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
total_usage += table_size;
return 0;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
+
static void free_page_ext(void *addr)
{
if (is_vmalloc_addr(addr)) {
@@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self,
return notifier_from_errno(ret);
}
-#endif
-
void __init page_ext_init(void)
{
unsigned long pfn;
diff --git a/mm/slab.c b/mm/slab.c
index d0f725637663..874b3f8fe80d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu)
return 0;
}
-#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
+#if defined(CONFIG_NUMA)
/*
* Drains freelist for a node on each slab cache, used for memory hot-remove.
* Returns -EBUSY if all objects cannot be drained so that the node is not
@@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self,
out:
return notifier_from_errno(ret);
}
-#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
+#endif /* CONFIG_NUMA */
/*
* swap the static kmem_cache_node with kmalloced memory