summaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-17 09:35:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-17 09:35:51 -0800
commitf66d6acccbc08b4146f4c2cf9445241f70f5517d (patch)
tree39000388d92a0428a5953f0af1b8fd0f0ef6c355 /arch/x86/kernel
parent4a5df37964673effcd9f84041f7423206a5ae5f2 (diff)
parent8d9ffb2fe65a6c4ef114e8d4f947958a12751bbe (diff)
downloadlwn-f66d6acccbc08b4146f4c2cf9445241f70f5517d.tar.gz
lwn-f66d6acccbc08b4146f4c2cf9445241f70f5517d.zip
Merge tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Make sure a kdump kernel with CONFIG_IMA_KEXEC enabled and booted on an AMD SME enabled hardware properly decrypts the ima_kexec buffer information passed to it from the previous kernel - Fix building the kernel with Clang where a non-TLS definition of the stack protector guard cookie leads to bogus code generation - Clear a wrongly advertised virtualized VMLOAD/VMSAVE feature flag on some Zen4 client systems as those insns are not supported on client * tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Fix a kdump kernel failure on SME system when CONFIG_IMA_KEXEC=y x86/stackprotector: Work around strict Clang TLS symbol requirements x86/CPU/AMD: Clear virtualized VMLOAD/VMSAVE on Zen4 client
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/amd.c11
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S3
3 files changed, 16 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index fab5caec0b72..823f44f7bc94 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
{
if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
+
+ /*
+ * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
+ * in some BIOS versions but they can lead to random host reboots.
+ */
+ switch (c->x86_model) {
+ case 0x18 ... 0x1f:
+ case 0x60 ... 0x7f:
+ clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
+ break;
+ }
}
static void init_amd_zen5(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a5f221ea5688..f43bb974fc66 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -2089,8 +2089,10 @@ void syscall_init(void)
#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
+#ifndef CONFIG_SMP
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
+#endif
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b8c5741d2fb4..feb8102a9ca7 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -491,6 +491,9 @@ SECTIONS
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
+/* needed for Clang - see arch/x86/entry/entry.S */
+PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
+
#ifdef CONFIG_X86_64
/*
* Per-cpu symbols which need to be offset from __per_cpu_load