summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorLi RongQing <lirongqing@baidu.com>2024-04-10 11:01:14 +0800
committerIngo Molnar <mingo@kernel.org>2024-04-10 06:55:31 +0200
commite0a9ac192fd62322b932c6018db60217b3ad866d (patch)
tree6fba8d76466bdc8f1458460be3c6feb1df992524 /arch/x86
parent7911f145de5fecbee1d67f27f73bec12f0fbc472 (diff)
downloadlwn-e0a9ac192fd62322b932c6018db60217b3ad866d.tar.gz
lwn-e0a9ac192fd62322b932c6018db60217b3ad866d.zip
x86/cpu: Take NUMA node into account when allocating per-CPU cpumasks
per-CPU cpumasks are dominantly accessed from their own local CPUs, so allocate them node-local to improve performance. [ mingo: Rewrote the changelog. ] Signed-off-by: Li RongQing <lirongqing@baidu.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20240410030114.6201-1-lirongqing@baidu.com
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c3
-rw-r--r--arch/x86/kernel/smpboot.c13
2 files changed, 9 insertions, 7 deletions
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 567dbd2fe4b6..afbb885ce290 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -183,7 +183,8 @@ static int x2apic_prepare_cpu(unsigned int cpu)
if (alloc_clustermask(cpu, cluster, cpu_to_node(cpu)) < 0)
return -ENOMEM;
- if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
+ if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL,
+ cpu_to_node(cpu)))
return -ENOMEM;
return 0;
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 76bb65045c64..536dad144036 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1033,7 +1033,7 @@ static __init void disable_smp(void)
void __init smp_prepare_cpus_common(void)
{
- unsigned int i;
+ unsigned int i, n;
/* Mark all except the boot CPU as hotpluggable */
for_each_possible_cpu(i) {
@@ -1042,11 +1042,12 @@ void __init smp_prepare_cpus_common(void)
}
for_each_possible_cpu(i) {
- zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
- zalloc_cpumask_var(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL);
+ n = cpu_to_node(i);
+ zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, i), GFP_KERNEL, n);
+ zalloc_cpumask_var_node(&per_cpu(cpu_core_map, i), GFP_KERNEL, n);
+ zalloc_cpumask_var_node(&per_cpu(cpu_die_map, i), GFP_KERNEL, n);
+ zalloc_cpumask_var_node(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL, n);
+ zalloc_cpumask_var_node(&per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL, n);
}
set_cpu_sibling_map(0);