diff options
Diffstat (limited to 'arch/arm64/kernel')
| -rw-r--r-- | arch/arm64/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/acpi.c | 30 | ||||
| -rw-r--r-- | arch/arm64/kernel/armv8_deprecated.c | 14 | ||||
| -rw-r--r-- | arch/arm64/kernel/cpufeature.c | 48 | ||||
| -rw-r--r-- | arch/arm64/kernel/entry-common.c | 52 | ||||
| -rw-r--r-- | arch/arm64/kernel/entry.S | 6 | ||||
| -rw-r--r-- | arch/arm64/kernel/hyp-stub.S | 1 | ||||
| -rw-r--r-- | arch/arm64/kernel/image-vars.h | 4 | ||||
| -rw-r--r-- | arch/arm64/kernel/machine_kexec.c | 3 | ||||
| -rw-r--r-- | arch/arm64/kernel/machine_kexec_file.c | 4 | ||||
| -rw-r--r-- | arch/arm64/kernel/mpam.c | 62 | ||||
| -rw-r--r-- | arch/arm64/kernel/mte.c | 10 | ||||
| -rw-r--r-- | arch/arm64/kernel/pi/patch-scs.c | 8 | ||||
| -rw-r--r-- | arch/arm64/kernel/process.c | 32 | ||||
| -rw-r--r-- | arch/arm64/kernel/relocate_kernel.S | 3 | ||||
| -rw-r--r-- | arch/arm64/kernel/rsi.c | 5 | ||||
| -rw-r--r-- | arch/arm64/kernel/static_call.c | 23 | ||||
| -rw-r--r-- | arch/arm64/kernel/sys32.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/sys_compat.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/syscall.c | 11 | ||||
| -rw-r--r-- | arch/arm64/kernel/vdso32/Makefile | 3 | ||||
| -rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 19 |
22 files changed, 269 insertions, 75 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 76f32e424065..74b76bb70452 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_MODULES) += module.o module-plts.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o +obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o @@ -67,6 +68,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MPAM) += mpam.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index a9d884fd1d00..5891f92c2035 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -458,3 +458,33 @@ int acpi_unmap_cpu(int cpu) } EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +int acpi_get_cpu_uid(unsigned int cpu, u32 *uid) +{ + struct acpi_madt_generic_interrupt *gicc; + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + gicc = acpi_cpu_get_madt_gicc(cpu); + if (!gicc) + return -ENODEV; + + *uid = gicc->uid; + return 0; +} +EXPORT_SYMBOL_GPL(acpi_get_cpu_uid); + +int get_cpu_for_acpi_id(u32 uid) +{ + u32 cpu_uid; + int ret; + + for (int cpu = 0; cpu < nr_cpu_ids; cpu++) { + ret = acpi_get_cpu_uid(cpu, &cpu_uid); + if (ret == 0 && uid == cpu_uid) + return cpu; + } + + return -EINVAL; +} diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index e737c6295ec7..b7a1f8b788bb 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -610,6 +610,20 @@ static int __init armv8_deprecated_init(void) } #endif + +#ifdef CONFIG_SWP_EMULATION + /* + * The purpose of supporting LSUI is to eliminate PAN toggling. CPUs + * that support LSUI are unlikely to support a 32-bit runtime. Rather + * than emulating the SWP instruction using LSUI instructions, simply + * disable SWP emulation. + */ + if (cpus_have_final_cap(ARM64_HAS_LSUI)) { + insn_swp.status = INSN_UNAVAILABLE; + pr_info("swp/swpb instruction emulation is not supported on this system\n"); + } +#endif + for (int i = 0; i < ARRAY_SIZE(insn_emulations); i++) { struct insn_emulation *ie = insn_emulations[i]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 32c2dbcc0c64..6d53bb15cf7b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -77,6 +77,7 @@ #include <linux/percpu.h> #include <linux/sched/isolation.h> +#include <asm/arm_pmuv3.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> @@ -86,6 +87,7 @@ #include <asm/kvm_host.h> #include <asm/mmu.h> #include <asm/mmu_context.h> +#include <asm/mpam.h> #include <asm/mte.h> #include <asm/hypervisor.h> #include <asm/processor.h> @@ -281,6 +283,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar2[] = { static const struct arm64_ftr_bits ftr_id_aa64isar3[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FPRCVT_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSUI_SHIFT, 4, ID_AA64ISAR3_EL1_LSUI_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_LSFE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0), ARM64_FTR_END, @@ -325,6 +328,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_GCIE_SHIFT, 4, ID_AA64PFR2_EL1_GCIE_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTEFAR_SHIFT, 4, ID_AA64PFR2_EL1_MTEFAR_NI), ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTESTOREONLY_SHIFT, 4, ID_AA64PFR2_EL1_MTESTOREONLY_NI), ARM64_FTR_END, @@ -565,7 +569,7 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { * We can instantiate multiple PMU instances with different levels * of support. */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6), ARM64_FTR_END, }; @@ -709,7 +713,7 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = { static const struct arm64_ftr_bits ftr_id_dfr0[] = { /* [31:28] TraceFilt */ - S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0), @@ -1927,19 +1931,10 @@ static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope) u64 dfr0 = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); unsigned int pmuver; - /* - * PMUVer follows the standard ID scheme for an unsigned field with the - * exception of 0xF (IMP_DEF) which is treated specially and implies - * FEAT_PMUv3 is not implemented. - * - * See DDI0487L.a D24.1.3.2 for more details. - */ pmuver = cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_EL1_PMUVer_SHIFT); - if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) - return false; - return pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP; + return pmuv3_implemented(pmuver); } #endif @@ -2501,13 +2496,19 @@ test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) static void cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) { - /* - * Access by the kernel (at EL1) should use the reserved PARTID - * which is configured unrestricted. This avoids priority-inversion - * where latency sensitive tasks have to wait for a task that has - * been throttled to release the lock. - */ - write_sysreg_s(0, SYS_MPAM1_EL1); + int cpu = smp_processor_id(); + u64 regval = 0; + + if (IS_ENABLED(CONFIG_ARM64_MPAM) && static_branch_likely(&mpam_enabled)) + regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + + write_sysreg_s(regval | MPAM1_EL1_MPAMEN, SYS_MPAM1_EL1); + if (cpus_have_cap(ARM64_SME)) + write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1); + isb(); + + /* Synchronising the EL0 write is left until the ERET to EL0 */ + write_sysreg_s(regval, SYS_MPAM0_EL1); } static bool @@ -3178,6 +3179,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ls64_v, ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V) }, +#ifdef CONFIG_ARM64_LSUI + { + .desc = "Unprivileged Load Store Instructions (LSUI)", + .capability = ARM64_HAS_LSUI, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64ISAR3_EL1, LSUI, IMP) + }, +#endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 3625797e9ee8..f42ce7b5c67f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -35,11 +35,11 @@ * Before this function is called it is not safe to call regular kernel code, * instrumentable code, or any code which may trigger an exception. */ -static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) +static noinstr irqentry_state_t arm64_enter_from_kernel_mode(struct pt_regs *regs) { irqentry_state_t state; - state = irqentry_enter(regs); + state = irqentry_enter_from_kernel_mode(regs); mte_check_tfsr_entry(); mte_disable_tco_entry(current); @@ -51,11 +51,14 @@ static noinstr irqentry_state_t enter_from_kernel_mode(struct pt_regs *regs) * After this function returns it is not safe to call regular kernel code, * instrumentable code, or any code which may trigger an exception. */ -static void noinstr exit_to_kernel_mode(struct pt_regs *regs, - irqentry_state_t state) +static void noinstr arm64_exit_to_kernel_mode(struct pt_regs *regs, + irqentry_state_t state) { + local_irq_disable(); + irqentry_exit_to_kernel_mode_preempt(regs, state); + local_daif_mask(); mte_check_tfsr_exit(); - irqentry_exit(regs, state); + irqentry_exit_to_kernel_mode_after_preempt(regs, state); } /* @@ -298,11 +301,10 @@ static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) unsigned long far = read_sysreg(far_el1); irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_mem_abort(far, esr, regs); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) @@ -310,55 +312,50 @@ static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) unsigned long far = read_sysreg(far_el1); irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_sp_pc_abort(far, esr, regs); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr) { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_el1_undef(regs, esr); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr) { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_el1_bti(regs, esr); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_gcs(struct pt_regs *regs, unsigned long esr) { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_el1_gcs(regs, esr); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_mops(struct pt_regs *regs, unsigned long esr) { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_el1_mops(regs, esr); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_breakpt(struct pt_regs *regs, unsigned long esr) @@ -420,11 +417,10 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr) { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); local_daif_inherit(regs); do_el1_fpac(regs, esr); - local_daif_mask(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs) @@ -491,13 +487,13 @@ static __always_inline void __el1_irq(struct pt_regs *regs, { irqentry_state_t state; - state = enter_from_kernel_mode(regs); + state = arm64_enter_from_kernel_mode(regs); irq_enter_rcu(); do_interrupt_handler(regs, handler); irq_exit_rcu(); - exit_to_kernel_mode(regs, state); + arm64_exit_to_kernel_mode(regs, state); } static void noinstr el1_interrupt(struct pt_regs *regs, void (*handler)(struct pt_regs *)) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f8018b5c1f9a..e0db14e9c843 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -273,7 +273,7 @@ alternative_if ARM64_HAS_ADDRESS_AUTH alternative_else_nop_endif 1: - scs_load_current + scs_load_current_base .else add x21, sp, #PT_REGS_SIZE get_current_task tsk @@ -378,8 +378,6 @@ alternative_if ARM64_WORKAROUND_845719 alternative_else_nop_endif #endif 3: - scs_save tsk - /* Ignore asynchronous tag check faults in the uaccess routines */ ldr x0, [tsk, THREAD_SCTLR_USER] clear_mte_async_tcf x0 @@ -473,7 +471,7 @@ alternative_else_nop_endif */ SYM_CODE_START_LOCAL(__swpan_entry_el1) mrs x21, ttbr0_el1 - tst x21, #TTBR_ASID_MASK // Check for the reserved ASID + tst x21, #TTBRx_EL1_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 085bc9972f6b..634ddc904244 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -103,7 +103,6 @@ SYM_CODE_START_LOCAL(__finalise_el2) // Engage the VHE magic! mov_q x0, HCR_HOST_VHE_FLAGS msr_hcr_el2 x0 - isb // Use the EL1 allocated stack, per-cpu offset mrs x0, sp_el1 diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d7b0d12b1015..d4c7d45ae6bc 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -138,6 +138,10 @@ KVM_NVHE_ALIAS(__hyp_data_start); KVM_NVHE_ALIAS(__hyp_data_end); KVM_NVHE_ALIAS(__hyp_rodata_start); KVM_NVHE_ALIAS(__hyp_rodata_end); +#ifdef CONFIG_NVHE_EL2_TRACING +KVM_NVHE_ALIAS(__hyp_event_ids_start); +KVM_NVHE_ALIAS(__hyp_event_ids_end); +#endif /* pKVM static key */ KVM_NVHE_ALIAS(kvm_protected_mode_initialized); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 239c16e3d02f..c5693a32e49b 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -129,9 +129,6 @@ int machine_kexec_post_load(struct kimage *kimage) } /* Create a copy of the linear map */ - trans_pgd = kexec_page_alloc(kimage); - if (!trans_pgd) - return -ENOMEM; rc = trans_pgd_create_copy(&info, &trans_pgd, PAGE_OFFSET, PAGE_END); if (rc) return rc; diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index fba260ad87a9..e31fabed378a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -134,6 +134,10 @@ int load_other_segments(struct kimage *image, kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + + ret = crash_load_dm_crypt_keys(image); + if (ret) + goto out_err; } #endif diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c new file mode 100644 index 000000000000..3a490de4fa12 --- /dev/null +++ b/arch/arm64/kernel/mpam.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025 Arm Ltd. */ + +#include <asm/mpam.h> + +#include <linux/arm_mpam.h> +#include <linux/cpu_pm.h> +#include <linux/jump_label.h> +#include <linux/percpu.h> + +DEFINE_STATIC_KEY_FALSE(mpam_enabled); +DEFINE_PER_CPU(u64, arm64_mpam_default); +DEFINE_PER_CPU(u64, arm64_mpam_current); + +u64 arm64_mpam_global_default; + +static int mpam_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + u64 regval; + int cpu = smp_processor_id(); + + switch (cmd) { + case CPU_PM_EXIT: + /* + * Don't use mpam_thread_switch() as the system register + * value has changed under our feet. + */ + regval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + write_sysreg_s(regval | MPAM1_EL1_MPAMEN, SYS_MPAM1_EL1); + if (system_supports_sme()) { + write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), + SYS_MPAMSM_EL1); + } + isb(); + + write_sysreg_s(regval, SYS_MPAM0_EL1); + + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block mpam_pm_nb = { + .notifier_call = mpam_pm_notifier, +}; + +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, mpamidr); + + if (!system_supports_mpam()) + return 0; + + cpu_pm_register_notifier(&mpam_pm_nb); + return mpam_register_requestor(partid_max, pmg_max); +} +/* Must occur before mpam_msc_driver_init() from subsys_initcall() */ +arch_initcall(arm64_mpam_register_cpus) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 32148bf09c1d..6874b16d0657 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -291,6 +291,9 @@ void mte_thread_switch(struct task_struct *next) /* TCO may not have been disabled on exception entry for the current task. */ mte_disable_tco_entry(next); + if (!system_uses_mte_async_or_asymm_mode()) + return; + /* * Check if an async tag exception occurred at EL1. * @@ -315,8 +318,8 @@ void mte_cpu_setup(void) * CnP is not a boot feature so MTE gets enabled before CnP, but let's * make sure that is the case. */ - BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT); - BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT); + BUG_ON(read_sysreg(ttbr0_el1) & TTBRx_EL1_CnP); + BUG_ON(read_sysreg(ttbr1_el1) & TTBRx_EL1_CnP); /* Normal Tagged memory type at the corresponding MAIR index */ sysreg_clear_set(mair_el1, @@ -350,6 +353,9 @@ void mte_suspend_enter(void) if (!system_supports_mte()) return; + if (!system_uses_mte_async_or_asymm_mode()) + return; + /* * The barriers are required to guarantee that the indirect writes * to TFSR_EL1 are synchronized before we report the state. diff --git a/arch/arm64/kernel/pi/patch-scs.c b/arch/arm64/kernel/pi/patch-scs.c index bbe7d30ed12b..dac568e4a54f 100644 --- a/arch/arm64/kernel/pi/patch-scs.c +++ b/arch/arm64/kernel/pi/patch-scs.c @@ -192,6 +192,14 @@ static int scs_handle_fde_frame(const struct eh_frame *frame, size -= 2; break; + case DW_CFA_advance_loc4: + loc += *opcode++ * code_alignment_factor; + loc += (*opcode++ << 8) * code_alignment_factor; + loc += (*opcode++ << 16) * code_alignment_factor; + loc += (*opcode++ << 24) * code_alignment_factor; + size -= 4; + break; + case DW_CFA_def_cfa: case DW_CFA_offset_extended: size = skip_xleb128(&opcode, size); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 489554931231..c0bf1f46cdc6 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,6 +51,7 @@ #include <asm/fpsimd.h> #include <asm/gcs.h> #include <asm/mmu_context.h> +#include <asm/mpam.h> #include <asm/mte.h> #include <asm/processor.h> #include <asm/pointer_auth.h> @@ -699,6 +700,29 @@ void update_sctlr_el1(u64 sctlr) isb(); } +static inline void debug_switch_state(void) +{ + if (system_uses_irq_prio_masking()) { + unsigned long daif_expected = 0; + unsigned long daif_actual = read_sysreg(daif); + unsigned long pmr_expected = GIC_PRIO_IRQOFF; + unsigned long pmr_actual = read_sysreg_s(SYS_ICC_PMR_EL1); + + WARN_ONCE(daif_actual != daif_expected || + pmr_actual != pmr_expected, + "Unexpected DAIF + PMR: 0x%lx + 0x%lx (expected 0x%lx + 0x%lx)\n", + daif_actual, pmr_actual, + daif_expected, pmr_expected); + } else { + unsigned long daif_expected = DAIF_PROCCTX_NOIRQ; + unsigned long daif_actual = read_sysreg(daif); + + WARN_ONCE(daif_actual != daif_expected, + "Unexpected DAIF value: 0x%lx (expected 0x%lx)\n", + daif_actual, daif_expected); + } +} + /* * Thread switching. */ @@ -708,6 +732,8 @@ struct task_struct *__switch_to(struct task_struct *prev, { struct task_struct *last; + debug_switch_state(); + fpsimd_thread_switch(next); tls_thread_switch(next); hw_breakpoint_thread_switch(next); @@ -738,6 +764,12 @@ struct task_struct *__switch_to(struct task_struct *prev, if (prev->thread.sctlr_user != next->thread.sctlr_user) update_sctlr_el1(next->thread.sctlr_user); + /* + * MPAM thread switch happens after the DSB to ensure prev's accesses + * use prev's MPAM settings. + */ + mpam_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index 413f899e4ac6..6cb4209f5dab 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S @@ -64,7 +64,8 @@ SYM_CODE_START(arm64_relocate_new_kernel) mov x19, x13 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8 add x1, x19, #PAGE_SIZE - dcache_by_myline_op civac, sy, x19, x1, x15, x20 + dcache_by_myline_op_nosync civac, x19, x1, x15, x20 + dsb sy b .Lnext .Ltest_indirection: tbz x16, IND_INDIRECTION_BIT, .Ltest_destination diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c index c64a06f58c0b..92160f2e57ff 100644 --- a/arch/arm64/kernel/rsi.c +++ b/arch/arm64/kernel/rsi.c @@ -12,6 +12,7 @@ #include <asm/io.h> #include <asm/mem_encrypt.h> +#include <asm/pgtable.h> #include <asm/rsi.h> static struct realm_config config; @@ -144,9 +145,9 @@ void __init arm64_rsi_init(void) return; if (!rsi_version_matches()) return; - if (WARN_ON(rsi_get_realm_config(&config))) + if (WARN_ON(rsi_get_realm_config(lm_alias(&config)))) return; - prot_ns_shared = BIT(config.ipa_bits - 1); + prot_ns_shared = __phys_to_pte_val(BIT(config.ipa_bits - 1)); if (arm64_ioremap_prot_hook_register(realm_ioremap_hook)) return; diff --git a/arch/arm64/kernel/static_call.c b/arch/arm64/kernel/static_call.c new file mode 100644 index 000000000000..8b3a19e10871 --- /dev/null +++ b/arch/arm64/kernel/static_call.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/static_call.h> +#include <linux/memory.h> +#include <asm/text-patching.h> + +void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) +{ + u64 literal; + int ret; + + if (!func) + func = __static_call_return0; + + /* decode the instructions to discover the literal address */ + literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) + + aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) + + 8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12, + le32_to_cpup(tramp + 8)); + + ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func); + WARN_ON_ONCE(ret); +} +EXPORT_SYMBOL_GPL(arch_static_call_transform); diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 96bcfb907443..12a948f3a504 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -89,7 +89,7 @@ COMPAT_SYSCALL_DEFINE4(aarch32_truncate64, const char __user *, pathname, COMPAT_SYSCALL_DEFINE4(aarch32_ftruncate64, unsigned int, fd, u32, __pad, arg_u32p(length)) { - return ksys_ftruncate(fd, arg_u64(length)); + return ksys_ftruncate(fd, arg_u64(length), FTRUNCATE_LFS); } COMPAT_SYSCALL_DEFINE5(aarch32_readahead, int, fd, u32, __pad, diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index b9d4998c97ef..7e9860143add 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end) * The workaround requires an inner-shareable tlbi. * We pick the reserved-ASID to minimise the impact. */ - __tlbi(aside1is, __TLBI_VADDR(0, 0)); + __tlbi(aside1is, 0UL); __tlbi_sync_s1ish(); } diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c index c062badd1a56..358ddfbf1401 100644 --- a/arch/arm64/kernel/syscall.c +++ b/arch/arm64/kernel/syscall.c @@ -52,17 +52,6 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno, } syscall_set_return_value(current, regs, 0, ret); - - /* - * This value will get limited by KSTACK_OFFSET_MAX(), which is 10 - * bits. The actual entropy will be further reduced by the compiler - * when applying stack alignment constraints: the AAPCS mandates a - * 16-byte aligned SP at function boundaries, which will remove the - * 4 low bits from any entropy chosen here. - * - * The resulting 6 bits of entropy is seen in SP[9:4]. - */ - choose_random_kstack_offset(get_random_u16()); } static inline bool has_syscall_work(unsigned long flags) diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 9d0efed91414..bea3675fa668 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -63,7 +63,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ $(filter -Werror,$(KBUILD_CPPFLAGS)) \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu11 -fms-extensions + $(CC_FLAGS_DIALECT) VDSO_CFLAGS += -O2 # Some useful compiler-dependent flags from top-level Makefile VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign) @@ -71,7 +71,6 @@ VDSO_CFLAGS += -fno-strict-overflow VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes) VDSO_CFLAGS += -Werror=date-time VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types) -VDSO_CFLAGS += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag) # Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is # unreliable. diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 2964aad0362e..e1ac876200a3 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -13,12 +13,23 @@ *(__kvm_ex_table) \ __stop___kvm_ex_table = .; +#ifdef CONFIG_NVHE_EL2_TRACING +#define HYPERVISOR_EVENT_IDS \ + . = ALIGN(PAGE_SIZE); \ + __hyp_event_ids_start = .; \ + *(HYP_SECTION_NAME(.event_ids)) \ + __hyp_event_ids_end = .; +#else +#define HYPERVISOR_EVENT_IDS +#endif + #define HYPERVISOR_RODATA_SECTIONS \ HYP_SECTION_NAME(.rodata) : { \ . = ALIGN(PAGE_SIZE); \ __hyp_rodata_start = .; \ *(HYP_SECTION_NAME(.data..ro_after_init)) \ *(HYP_SECTION_NAME(.rodata)) \ + HYPERVISOR_EVENT_IDS \ . = ALIGN(PAGE_SIZE); \ __hyp_rodata_end = .; \ } @@ -191,6 +202,7 @@ SECTIONS LOCK_TEXT KPROBES_TEXT HYPERVISOR_TEXT + STATIC_CALL_TEXT *(.gnu.warning) } @@ -307,6 +319,13 @@ SECTIONS HYPERVISOR_DATA_SECTION +#ifdef CONFIG_NVHE_EL2_TRACING + .data.hyp_events : { + __hyp_events_start = .; + *(SORT(_hyp_events.*)) + __hyp_events_end = .; + } +#endif /* * Data written with the MMU off but read with the MMU on requires * cache lines to be invalidated, discarding up to a Cache Writeback |
