diff options
author | Mark Rutland <mark.rutland@arm.com> | 2020-10-05 17:43:03 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2020-10-05 18:54:49 +0100 |
commit | 353e228eb355be5a65a3c0996c774a0f46737fda (patch) | |
tree | 1bd5619d6b4765e8dd452f4144e20e0bfe04dc94 /arch/arm64 | |
parent | 4dafc08d0ba4768e8540f49ab40c3ea26e40d554 (diff) | |
download | lwn-353e228eb355be5a65a3c0996c774a0f46737fda.tar.gz lwn-353e228eb355be5a65a3c0996c774a0f46737fda.zip |
arm64: initialize per-cpu offsets earlier
The current initialization of the per-cpu offset register is difficult
to follow and this initialization is not always early enough for
upcoming instrumentation with KCSAN, where the instrumentation callbacks
use the per-cpu offset.
To make it possible to support KCSAN, and to simplify reasoning about
early bringup code, let's initialize the per-cpu offset earlier, before
we run any C code that may consume it. To do so, this patch adds a new
init_this_cpu_offset() helper that's called before the usual
primary/secondary start functions. For consistency, this is also used to
re-initialize the per-cpu offset after the runtime per-cpu areas have
been allocated (which can change CPU0's offset).
So that init_this_cpu_offset() isn't subject to any instrumentation that
might consume the per-cpu offset, it is marked with noinstr, preventing
instrumentation.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201005164303.21389-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r-- | arch/arm64/include/asm/cpu.h | 2 | ||||
-rw-r--r-- | arch/arm64/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 13 |
4 files changed, 19 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 7faae6ff3ab4..d9d60b18e811 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -68,4 +68,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info); void update_cpu_features(int cpu, struct cpuinfo_arm64 *info, struct cpuinfo_arm64 *boot); +void init_this_cpu_offset(void); + #endif /* __ASM_CPU_H */ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 037421c66b14..2720e6ec6814 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -452,6 +452,8 @@ SYM_FUNC_START_LOCAL(__primary_switched) bl __pi_memset dsb ishst // Make zero page visible to PTW + bl init_this_cpu_offset + #ifdef CONFIG_KASAN bl kasan_early_init #endif @@ -758,6 +760,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched) ptrauth_keys_init_cpu x2, x3, x4, x5 #endif + bl init_this_cpu_offset b secondary_start_kernel SYM_FUNC_END(__secondary_switched) diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 77c4c9bad1b8..005171972764 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void) u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; set_cpu_logical_map(0, mpidr); - /* - * clear __my_cpu_offset on boot CPU to avoid hang caused by - * using percpu variable early, for example, lockdep will - * access percpu variable inside lock_release - */ - set_my_cpu_offset(0); pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n", (unsigned long)mpidr, read_cpuid_id()); } @@ -282,6 +276,12 @@ u64 cpu_logical_map(int cpu) } EXPORT_SYMBOL_GPL(cpu_logical_map); +void noinstr init_this_cpu_offset(void) +{ + unsigned int cpu = task_cpu(current); + set_my_cpu_offset(per_cpu_offset(cpu)); +} + void __init __no_sanitize_address setup_arch(char **cmdline_p) { init_mm.start_code = (unsigned long) _text; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 355ee9eed4dd..7714310fba22 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -192,10 +192,7 @@ asmlinkage notrace void secondary_start_kernel(void) u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; struct mm_struct *mm = &init_mm; const struct cpu_operations *ops; - unsigned int cpu; - - cpu = task_cpu(current); - set_my_cpu_offset(per_cpu_offset(cpu)); + unsigned int cpu = smp_processor_id(); /* * All kernel threads share the same mm context; grab a @@ -435,7 +432,13 @@ void __init smp_cpus_done(unsigned int max_cpus) void __init smp_prepare_boot_cpu(void) { - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); + /* + * Now that setup_per_cpu_areas() has allocated the runtime per-cpu + * areas it is only safe to read the CPU0 boot-time area, and we must + * reinitialize the offset to point to the runtime area. + */ + init_this_cpu_offset(); + cpuinfo_store_boot_cpu(); /* |