summaryrefslogtreecommitdiff
path: root/arch/arm64/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include')
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative-macros.h8
-rw-r--r--arch/arm64/include/asm/alternative.h11
-rw-r--r--arch/arm64/include/asm/apple_m1_pmu.h1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h4
-rw-r--r--arch/arm64/include/asm/asm-bug.h33
-rw-r--r--arch/arm64/include/asm/asm-extable.h16
-rw-r--r--arch/arm64/include/asm/asm-uaccess.h6
-rw-r--r--arch/arm64/include/asm/assembler.h81
-rw-r--r--arch/arm64/include/asm/atomic_lse.h20
-rw-r--r--arch/arm64/include/asm/barrier.h7
-rw-r--r--arch/arm64/include/asm/bug.h2
-rw-r--r--arch/arm64/include/asm/cache.h26
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cfi.h7
-rw-r--r--arch/arm64/include/asm/cmpxchg.h12
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpucaps.h14
-rw-r--r--arch/arm64/include/asm/cpufeature.h73
-rw-r--r--arch/arm64/include/asm/cputype.h72
-rw-r--r--arch/arm64/include/asm/current.h4
-rw-r--r--arch/arm64/include/asm/daifflags.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h44
-rw-r--r--arch/arm64/include/asm/efi.h15
-rw-r--r--arch/arm64/include/asm/el2_setup.h313
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/entry-common.h57
-rw-r--r--arch/arm64/include/asm/esr.h85
-rw-r--r--arch/arm64/include/asm/exception.h15
-rw-r--r--arch/arm64/include/asm/extable.h4
-rw-r--r--arch/arm64/include/asm/fixmap.h10
-rw-r--r--arch/arm64/include/asm/fpsimd.h67
-rw-r--r--arch/arm64/include/asm/fpu.h16
-rw-r--r--arch/arm64/include/asm/ftrace.h58
-rw-r--r--arch/arm64/include/asm/futex.h311
-rw-r--r--arch/arm64/include/asm/gcs.h93
-rw-r--r--arch/arm64/include/asm/gpr-num.h6
-rw-r--r--arch/arm64/include/asm/hardirq.h4
-rw-r--r--arch/arm64/include/asm/hugetlb.h43
-rw-r--r--arch/arm64/include/asm/hwcap.h103
-rw-r--r--arch/arm64/include/asm/hyperv-tlfs.h71
-rw-r--r--arch/arm64/include/asm/hypervisor.h1
-rw-r--r--arch/arm64/include/asm/image.h4
-rw-r--r--arch/arm64/include/asm/insn.h40
-rw-r--r--arch/arm64/include/asm/io.h30
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/kasan.h2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h10
-rw-r--r--arch/arm64/include/asm/kexec.h4
-rw-r--r--arch/arm64/include/asm/kfence.h3
-rw-r--r--arch/arm64/include/asm/kgdb.h16
-rw-r--r--arch/arm64/include/asm/kprobes.h8
-rw-r--r--arch/arm64/include/asm/kvm_arm.h256
-rw-r--r--arch/arm64/include/asm/kvm_asm.h60
-rw-r--r--arch/arm64/include/asm/kvm_define_hypevents.h16
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h288
-rw-r--r--arch/arm64/include/asm/kvm_host.h654
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h19
-rw-r--r--arch/arm64/include/asm/kvm_hypevents.h60
-rw-r--r--arch/arm64/include/asm/kvm_hyptrace.h26
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h48
-rw-r--r--arch/arm64/include/asm/kvm_mte.h4
-rw-r--r--arch/arm64/include/asm/kvm_nested.h183
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h152
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h87
-rw-r--r--arch/arm64/include/asm/kvm_ptrauth.h6
-rw-r--r--arch/arm64/include/asm/kvm_ras.h25
-rw-r--r--arch/arm64/include/asm/linkage.h2
-rw-r--r--arch/arm64/include/asm/lse.h9
-rw-r--r--arch/arm64/include/asm/lsui.h27
-rw-r--r--arch/arm64/include/asm/mem_encrypt.h13
-rw-r--r--arch/arm64/include/asm/memory.h28
-rw-r--r--arch/arm64/include/asm/mman.h13
-rw-r--r--arch/arm64/include/asm/mmu.h28
-rw-r--r--arch/arm64/include/asm/mmu_context.h37
-rw-r--r--arch/arm64/include/asm/module.h1
-rw-r--r--arch/arm64/include/asm/module.lds.h1
-rw-r--r--arch/arm64/include/asm/mpam.h96
-rw-r--r--arch/arm64/include/asm/mshyperv.h20
-rw-r--r--arch/arm64/include/asm/mte-kasan.h10
-rw-r--r--arch/arm64/include/asm/mte.h26
-rw-r--r--arch/arm64/include/asm/neon.h4
-rw-r--r--arch/arm64/include/asm/page.h16
-rw-r--r--arch/arm64/include/asm/paravirt.h14
-rw-r--r--arch/arm64/include/asm/paravirt_api_clock.h1
-rw-r--r--arch/arm64/include/asm/percpu.h15
-rw-r--r--arch/arm64/include/asm/pgalloc.h18
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h193
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h69
-rw-r--r--arch/arm64/include/asm/pgtable-types.h20
-rw-r--r--arch/arm64/include/asm/pgtable.h490
-rw-r--r--arch/arm64/include/asm/pkeys.h3
-rw-r--r--arch/arm64/include/asm/por.h11
-rw-r--r--arch/arm64/include/asm/preempt.h2
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h13
-rw-r--r--arch/arm64/include/asm/ptdump.h30
-rw-r--r--arch/arm64/include/asm/ptrace.h17
-rw-r--r--arch/arm64/include/asm/resctrl.h2
-rw-r--r--arch/arm64/include/asm/rqspinlock.h93
-rw-r--r--arch/arm64/include/asm/rsi.h4
-rw-r--r--arch/arm64/include/asm/rsi_cmds.h2
-rw-r--r--arch/arm64/include/asm/rsi_smc.h4
-rw-r--r--arch/arm64/include/asm/runtime-const.h4
-rw-r--r--arch/arm64/include/asm/rwonce.h32
-rw-r--r--arch/arm64/include/asm/scs.h14
-rw-r--r--arch/arm64/include/asm/sdei.h4
-rw-r--r--arch/arm64/include/asm/seccomp.h1
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/setup.h4
-rw-r--r--arch/arm64/include/asm/simd.h19
-rw-r--r--arch/arm64/include/asm/smp.h28
-rw-r--r--arch/arm64/include/asm/sparsemem.h5
-rw-r--r--arch/arm64/include/asm/spectre.h9
-rw-r--r--arch/arm64/include/asm/stacktrace.h6
-rw-r--r--arch/arm64/include/asm/stacktrace/frame.h4
-rw-r--r--arch/arm64/include/asm/stacktrace/nvhe.h2
-rw-r--r--arch/arm64/include/asm/static_call.h31
-rw-r--r--arch/arm64/include/asm/suspend.h4
-rw-r--r--arch/arm64/include/asm/syscall.h41
-rw-r--r--arch/arm64/include/asm/sysreg.h278
-rw-r--r--arch/arm64/include/asm/system_misc.h8
-rw-r--r--arch/arm64/include/asm/thread_info.h25
-rw-r--r--arch/arm64/include/asm/tlb.h27
-rw-r--r--arch/arm64/include/asm/tlbflush.h516
-rw-r--r--arch/arm64/include/asm/topology.h3
-rw-r--r--arch/arm64/include/asm/traps.h11
-rw-r--r--arch/arm64/include/asm/uaccess.h58
-rw-r--r--arch/arm64/include/asm/uprobes.h11
-rw-r--r--arch/arm64/include/asm/vdso.h6
-rw-r--r--arch/arm64/include/asm/vdso/compat_barrier.h11
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h51
-rw-r--r--arch/arm64/include/asm/vdso/getrandom.h16
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h57
-rw-r--r--arch/arm64/include/asm/vdso/processor.h4
-rw-r--r--arch/arm64/include/asm/vdso/vsyscall.h34
-rw-r--r--arch/arm64/include/asm/virt.h23
-rw-r--r--arch/arm64/include/asm/vmalloc.h54
-rw-r--r--arch/arm64/include/asm/vmap_stack.h4
-rw-r--r--arch/arm64/include/asm/vncr_mapping.h12
-rw-r--r--arch/arm64/include/asm/xen/events.h2
-rw-r--r--arch/arm64/include/asm/xor.h77
-rw-r--r--arch/arm64/include/uapi/asm/bitsperlong.h5
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h19
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h29
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h4
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h4
148 files changed, 4232 insertions, 2409 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 4e350df9a02d..d2ff8f6c3231 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -8,6 +8,7 @@ syscall-y += unistd_32.h
syscall-y += unistd_compat_32.h
generic-y += early_ioremap.h
+generic-y += fprobe.h
generic-y += mcs_spinlock.h
generic-y += mmzone.h
generic-y += qrwlock.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index a407f9cd549e..8a54ca6ba602 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -114,22 +114,7 @@ static inline bool acpi_has_cpu_in_madt(void)
}
struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu);
-static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
-{
- return acpi_cpu_get_madt_gicc(cpu)->uid;
-}
-
-static inline int get_cpu_for_acpi_id(u32 uid)
-{
- int cpu;
-
- for (cpu = 0; cpu < nr_cpu_ids; cpu++)
- if (acpi_cpu_get_madt_gicc(cpu) &&
- uid == get_acpi_id_for_cpu(cpu))
- return cpu;
-
- return -EINVAL;
-}
+int get_cpu_for_acpi_id(u32 uid);
static inline void arch_fix_phys_package_id(int num, u32 slot) { }
void __init acpi_init_cpus(void);
@@ -150,7 +135,7 @@ acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
{}
#endif
-static inline const char *acpi_get_enable_method(int cpu)
+static __always_inline const char *acpi_get_enable_method(int cpu)
{
if (acpi_psci_present())
return "psci";
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index c8c77f9e36d6..862416624852 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -19,7 +19,7 @@
#error "cpucaps have overflown ARM64_CB_BIT"
#endif
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/stringify.h>
@@ -207,7 +207,7 @@ alternative_endif
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Usage: asm(ALTERNATIVE(oldinstr, newinstr, cpucap));
@@ -219,7 +219,7 @@ alternative_endif
#define ALTERNATIVE(oldinstr, newinstr, ...) \
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -263,6 +263,6 @@ l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ALTERNATIVE_MACROS_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 00d97b8a757f..621aa8550174 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -4,7 +4,7 @@
#include <asm/alternative-macros.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/init.h>
#include <linux/types.h>
@@ -26,13 +26,16 @@ void __init apply_alternatives_all(void);
bool alternative_is_applied(u16 cpucap);
#ifdef CONFIG_MODULES
-void apply_alternatives_module(void *start, size_t length);
+int apply_alternatives_module(void *start, size_t length);
#else
-static inline void apply_alternatives_module(void *start, size_t length) { }
+static inline int apply_alternatives_module(void *start, size_t length)
+{
+ return 0;
+}
#endif
void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
__le32 *updptr, int nr_inst);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/apple_m1_pmu.h b/arch/arm64/include/asm/apple_m1_pmu.h
index 99483b19b99f..02e05d05851f 100644
--- a/arch/arm64/include/asm/apple_m1_pmu.h
+++ b/arch/arm64/include/asm/apple_m1_pmu.h
@@ -37,6 +37,7 @@
#define PMCR0_PMI_ENABLE_8_9 GENMASK(45, 44)
#define SYS_IMP_APL_PMCR1_EL1 sys_reg(3, 1, 15, 1, 0)
+#define SYS_IMP_APL_PMCR1_EL12 sys_reg(3, 1, 15, 7, 2)
#define PMCR1_COUNT_A64_EL0_0_7 GENMASK(15, 8)
#define PMCR1_COUNT_A64_EL1_0_7 GENMASK(23, 16)
#define PMCR1_COUNT_A64_EL0_8_9 GENMASK(41, 40)
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 9e96f024b2f1..d20b03931a8d 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -9,7 +9,7 @@
#include <asm/sysreg.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/irqchip/arm-gic-common.h>
#include <linux/stringify.h>
@@ -188,5 +188,5 @@ static inline bool gic_has_relaxed_pmr_sync(void)
return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index 6e73809f6492..a5f13801b784 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -21,16 +21,21 @@
#endif
#ifdef CONFIG_GENERIC_BUG
-
-#define __BUG_ENTRY(flags) \
+#define __BUG_ENTRY_START \
.pushsection __bug_table,"aw"; \
.align 2; \
14470: .long 14471f - .; \
-_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
- .short flags; \
+
+#define __BUG_ENTRY_END \
.align 2; \
.popsection; \
14471:
+
+#define __BUG_ENTRY(flags) \
+ __BUG_ENTRY_START \
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ .short flags; \
+ __BUG_ENTRY_END
#else
#define __BUG_ENTRY(flags)
#endif
@@ -41,4 +46,24 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
#define ASM_BUG() ASM_BUG_FLAGS(0)
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_LOCATION_STRING(file, line) \
+ ".long " file "- .;" \
+ ".short " line ";"
+#else
+#define __BUG_LOCATION_STRING(file, line)
+#endif
+
+#define __BUG_ENTRY_STRING(file, line, flags) \
+ __stringify(__BUG_ENTRY_START) \
+ __BUG_LOCATION_STRING(file, line) \
+ ".short " flags ";" \
+ __stringify(__BUG_ENTRY_END)
+
+#define ARCH_WARN_ASM(file, line, flags, size) \
+ __BUG_ENTRY_STRING(file, line, flags) \
+ __stringify(brk BUG_BRK_IMM)
+
+#define ARCH_WARN_REACHABLE
+
#endif /* __ASM_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h
index b8a5861dc7b7..d67e2fdd1aee 100644
--- a/arch/arm64/include/asm/asm-extable.h
+++ b/arch/arm64/include/asm/asm-extable.h
@@ -9,7 +9,8 @@
#define EX_TYPE_BPF 1
#define EX_TYPE_UACCESS_ERR_ZERO 2
#define EX_TYPE_KACCESS_ERR_ZERO 3
-#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+#define EX_TYPE_UACCESS_CPY 4
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 5
/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
#define EX_DATA_REG_ERR_SHIFT 0
@@ -23,7 +24,10 @@
#define EX_DATA_REG_ADDR_SHIFT 5
#define EX_DATA_REG_ADDR GENMASK(9, 5)
-#ifdef __ASSEMBLY__
+/* Data fields for EX_TYPE_UACCESS_CPY */
+#define EX_DATA_UACCESS_WRITE BIT(0)
+
+#ifdef __ASSEMBLER__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
@@ -69,7 +73,11 @@
.endif
.endm
-#else /* __ASSEMBLY__ */
+ .macro _asm_extable_uaccess_cpy, insn, fixup, uaccess_is_write
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_UACCESS_CPY, \uaccess_is_write)
+ .endm
+
+#else /* __ASSEMBLER__ */
#include <linux/stringify.h>
@@ -124,6 +132,6 @@
EX_DATA_REG(ADDR, addr) \
")")
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
index 5b6efe8abeeb..12aa6a283249 100644
--- a/arch/arm64/include/asm/asm-uaccess.h
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -15,7 +15,7 @@
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
- bic \tmp1, \tmp1, #TTBR_ASID_MASK
+ bic \tmp1, \tmp1, #TTBRx_EL1_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET // reserved_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
add \tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
@@ -61,6 +61,10 @@ alternative_else_nop_endif
9999: x; \
_asm_extable_uaccess 9999b, l
+#define USER_CPY(l, uaccess_is_write, x...) \
+9999: x; \
+ _asm_extable_uaccess_cpy 9999b, l, uaccess_is_write
+
/*
* Generate the assembly for LDTR/STTR with exception table entries.
* This is complicated as there is no post-increment or pair versions of the
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3d8d534a7a77..effae53e9739 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -5,7 +5,7 @@
* Copyright (C) 1996-2000 Russell King
* Copyright (C) 2012 ARM Ltd.
*/
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#error "Only include this from assembly code"
#endif
@@ -41,6 +41,11 @@
/*
* Save/restore interrupts.
*/
+ .macro save_and_disable_daif, flags
+ mrs \flags, daif
+ msr daifset, #0xf
+ .endm
+
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #3
@@ -53,7 +58,7 @@
.macro disable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- bic \tmp, \tmp, #DBG_MDSCR_SS
+ bic \tmp, \tmp, #MDSCR_EL1_SS
msr mdscr_el1, \tmp
isb // Take effect before a subsequent clear of DAIF.D
9990:
@@ -63,7 +68,7 @@
.macro enable_step_tsk, flgs, tmp
tbz \flgs, #TIF_SINGLESTEP, 9990f
mrs \tmp, mdscr_el1
- orr \tmp, \tmp, #DBG_MDSCR_SS
+ orr \tmp, \tmp, #MDSCR_EL1_SS
msr mdscr_el1, \tmp
9990:
.endm
@@ -320,14 +325,14 @@ alternative_cb_end
* tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
*/
.macro tcr_set_t0sz, valreg, t0sz
- bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ bfi \valreg, \t0sz, #TCR_EL1_T0SZ_SHIFT, #TCR_EL1_T0SZ_WIDTH
.endm
/*
* tcr_set_t1sz - update TCR.T1SZ
*/
.macro tcr_set_t1sz, valreg, t1sz
- bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
+ bfi \valreg, \t1sz, #TCR_EL1_T1SZ_SHIFT, #TCR_EL1_T1SZ_WIDTH
.endm
/*
@@ -343,6 +348,11 @@ alternative_cb_end
// Narrow PARange to fit the PS field in TCR_ELx
ubfx \tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
+#ifdef CONFIG_ARM64_LPA2
+alternative_if_not ARM64_HAS_VA52
+ mov \tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
+alternative_else_nop_endif
+#endif
cmp \tmp0, \tmp1
csel \tmp0, \tmp1, \tmp0, hi
bfi \tcr, \tmp0, \pos, #3
@@ -361,16 +371,18 @@ alternative_endif
* [start, end) with dcache line size explicitly provided.
*
* op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
* start: starting virtual address of the region
* end: end virtual address of the region
* linesz: dcache line size
* fixup: optional label to branch to on user fault
* Corrupts: start, end, tmp
*/
- .macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
+ .macro dcache_by_myline_op_nosync op, start, end, linesz, tmp, fixup
sub \tmp, \linesz, #1
bic \start, \start, \tmp
+alternative_if ARM64_WORKAROUND_4311569
+ mov \tmp, \start
+alternative_else_nop_endif
.Ldcache_op\@:
.ifc \op, cvau
__dcache_op_workaround_clean_cache \op, \start
@@ -392,25 +404,46 @@ alternative_endif
add \start, \start, \linesz
cmp \start, \end
b.lo .Ldcache_op\@
- dsb \domain
+alternative_if ARM64_WORKAROUND_4311569
+ .ifnc \op, cvau
+ mov \start, \tmp
+ mov \tmp, xzr
+ cbnz \start, .Ldcache_op\@
+ .endif
+alternative_else_nop_endif
_cond_uaccess_extable .Ldcache_op\@, \fixup
.endm
/*
* Macro to perform a data cache maintenance for the interval
- * [start, end)
+ * [start, end) without waiting for completion
*
* op: operation passed to dc instruction
- * domain: domain used in dsb instruciton
* start: starting virtual address of the region
* end: end virtual address of the region
* fixup: optional label to branch to on user fault
* Corrupts: start, end, tmp1, tmp2
*/
- .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
+ .macro dcache_by_line_op_nosync op, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
- dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
+ dcache_by_myline_op_nosync \op, \start, \end, \tmp1, \tmp2, \fixup
+ .endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
+ * [start, end) and wait for completion
+ *
+ * op: operation passed to dc instruction
+ * domain: domain used in dsb instruction
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
+ */
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
+ dcache_by_line_op_nosync \op, \start, \end, \tmp1, \tmp2, \fixup
+ dsb \domain
.endm
/*
@@ -579,7 +612,7 @@ alternative_endif
.macro offset_ttbr1, ttbr, tmp
#if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
mrs \tmp, tcr_el1
- and \tmp, \tmp, #TCR_T1SZ_MASK
+ and \tmp, \tmp, #TCR_EL1_T1SZ_MASK
cmp \tmp, #TCR_T1SZ(VA_BITS_MIN)
orr \tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
csel \ttbr, \tmp, \ttbr, eq
@@ -728,28 +761,6 @@ alternative_endif
set_sctlr sctlr_el2, \reg
.endm
- /*
- * Check whether asm code should yield as soon as it is able. This is
- * the case if we are currently running in task context, and the
- * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
- * is stored negated in the top word of the thread_info::preempt_count
- * field)
- */
- .macro cond_yield, lbl:req, tmp:req, tmp2
-#ifdef CONFIG_PREEMPT_VOLUNTARY
- get_current_task \tmp
- ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
- /*
- * If we are serving a softirq, there is no point in yielding: the
- * softirq will not be preempted no matter what we do, so we should
- * run to completion as quickly as we can. The preempt_count field will
- * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
- * catch this case too.
- */
- cbz \tmp, \lbl
-#endif
- .endm
-
/*
* Branch Target Identifier (BTI)
*/
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 87f568a94e55..afad1849c4cf 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -103,17 +103,17 @@ static __always_inline void __lse_atomic_and(int i, atomic_t *v)
return __lse_atomic_andnot(~i, v);
}
-#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
+#define ATOMIC_FETCH_OP_AND(name) \
static __always_inline int \
__lse_atomic_fetch_and##name(int i, atomic_t *v) \
{ \
return __lse_atomic_fetch_andnot##name(~i, v); \
}
-ATOMIC_FETCH_OP_AND(_relaxed, )
-ATOMIC_FETCH_OP_AND(_acquire, a, "memory")
-ATOMIC_FETCH_OP_AND(_release, l, "memory")
-ATOMIC_FETCH_OP_AND( , al, "memory")
+ATOMIC_FETCH_OP_AND(_relaxed)
+ATOMIC_FETCH_OP_AND(_acquire)
+ATOMIC_FETCH_OP_AND(_release)
+ATOMIC_FETCH_OP_AND( )
#undef ATOMIC_FETCH_OP_AND
@@ -210,17 +210,17 @@ static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
return __lse_atomic64_andnot(~i, v);
}
-#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
+#define ATOMIC64_FETCH_OP_AND(name) \
static __always_inline long \
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
{ \
return __lse_atomic64_fetch_andnot##name(~i, v); \
}
-ATOMIC64_FETCH_OP_AND(_relaxed, )
-ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
-ATOMIC64_FETCH_OP_AND(_release, l, "memory")
-ATOMIC64_FETCH_OP_AND( , al, "memory")
+ATOMIC64_FETCH_OP_AND(_relaxed)
+ATOMIC64_FETCH_OP_AND(_acquire)
+ATOMIC64_FETCH_OP_AND(_release)
+ATOMIC64_FETCH_OP_AND( )
#undef ATOMIC64_FETCH_OP_AND
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 1ca947d5c939..9495c4441a46 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -7,7 +7,7 @@
#ifndef __ASM_BARRIER_H
#define __ASM_BARRIER_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kasan-checks.h>
@@ -44,6 +44,9 @@
SB_BARRIER_INSN"nop\n", \
ARM64_HAS_SB))
+#define gsb_ack() asm volatile(GSB_ACK_BARRIER_INSN : : : "memory")
+#define gsb_sys() asm volatile(GSB_SYS_BARRIER_INSN : : : "memory")
+
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
@@ -218,6 +221,6 @@ do { \
#include <asm-generic/barrier.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_BARRIER_H */
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
index 28be048db3f6..bceeaec21fb9 100644
--- a/arch/arm64/include/asm/bug.h
+++ b/arch/arm64/include/asm/bug.h
@@ -19,7 +19,7 @@
unreachable(); \
} while (0)
-#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+#define __WARN_FLAGS(cond_str, flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
#define HAVE_ARCH_BUG
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 06a4670bdb0b..10a7ffadee3d 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -35,7 +35,7 @@
#define ARCH_DMA_MINALIGN (128)
#define ARCH_KMALLOC_MINALIGN (8)
-#ifndef __ASSEMBLY__
+#if !defined(__ASSEMBLER__) && !defined(BUILD_VDSO)
#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
@@ -87,6 +87,28 @@ int cache_line_size(void);
#define dma_get_cache_alignment cache_line_size
+static inline void arch_sync_dma_flush(void)
+{
+ dsb(sy);
+}
+
+/* Compress a u64 MPIDR value into 32 bits. */
+static inline u64 arch_compact_of_hwid(u64 id)
+{
+ u64 aff3 = MPIDR_AFFINITY_LEVEL(id, 3);
+
+ /*
+ * These bits are expected to be RES0. If not, return a value with
+ * the upper 32 bits set to force the caller to give up on 32 bit
+ * cache ids.
+ */
+ if (FIELD_GET(GENMASK_ULL(63, 40), id))
+ return id;
+
+ return (aff3 << 24) | FIELD_GET(GENMASK_ULL(23, 0), id);
+}
+#define arch_compact_of_hwid arch_compact_of_hwid
+
/*
* Read the effective value of CTR_EL0.
*
@@ -118,6 +140,6 @@ static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
return ctr;
}
-#endif /* __ASSEMBLY__ */
+#endif /* !defined(__ASSEMBLER__) && !defined(BUILD_VDSO) */
#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 28ab96e808ef..382b4ac3734d 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -74,6 +74,8 @@ extern void icache_inval_pou(unsigned long start, unsigned long end);
extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
extern void dcache_inval_poc(unsigned long start, unsigned long end);
extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc_nosync(unsigned long start, unsigned long end);
+extern void dcache_clean_poc_nosync(unsigned long start, unsigned long end);
extern void dcache_clean_pop(unsigned long start, unsigned long end);
extern void dcache_clean_pou(unsigned long start, unsigned long end);
extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
diff --git a/arch/arm64/include/asm/cfi.h b/arch/arm64/include/asm/cfi.h
new file mode 100644
index 000000000000..ab90f0351b7a
--- /dev/null
+++ b/arch/arm64/include/asm/cfi.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_CFI_H
+#define _ASM_ARM64_CFI_H
+
+#define __bpfcall
+
+#endif /* _ASM_ARM64_CFI_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d7a540736741..6cf3cd6873f5 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -91,8 +91,9 @@ __XCHG_GEN(_mb)
#define __xchg_wrapper(sfx, ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret = (__force __typeof__(*(ptr))) \
+ __arch_xchg##sfx((__force unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
__ret; \
})
@@ -175,9 +176,10 @@ __CMPXCHG_GEN(_mb)
#define __cmpxchg_wrapper(sfx, ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
- __ret = (__typeof__(*(ptr))) \
- __cmpxchg##sfx((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
+ __ret = (__force __typeof__(*(ptr))) \
+ __cmpxchg##sfx((ptr), (__force unsigned long)(o), \
+ (__force unsigned long)(n), \
+ sizeof(*(ptr))); \
__ret; \
})
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 81e4157f92b7..71493b760b83 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -44,6 +44,7 @@ struct cpuinfo_arm64 {
u64 reg_dczid;
u64 reg_midr;
u64 reg_revidr;
+ u64 reg_aidr;
u64 reg_gmid;
u64 reg_smidr;
u64 reg_mpamidr;
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 201a46efd918..6e3da333442e 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -5,7 +5,7 @@
#include <asm/cpucap-defs.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
/*
* Check whether a cpucap is possible at compiletime.
@@ -19,8 +19,6 @@ cpucap_is_possible(const unsigned int cap)
"cap must be < ARM64_NCAPS");
switch (cap) {
- case ARM64_HAS_PAN:
- return IS_ENABLED(CONFIG_ARM64_PAN);
case ARM64_HAS_EPAN:
return IS_ENABLED(CONFIG_ARM64_EPAN);
case ARM64_SVE:
@@ -44,6 +42,10 @@ cpucap_is_possible(const unsigned int cap)
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
case ARM64_HAS_S1POE:
return IS_ENABLED(CONFIG_ARM64_POE);
+ case ARM64_HAS_GCS:
+ return IS_ENABLED(CONFIG_ARM64_GCS);
+ case ARM64_HAFT:
+ return IS_ENABLED(CONFIG_ARM64_HAFT);
case ARM64_UNMAP_KERNEL_AT_EL0:
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
case ARM64_WORKAROUND_843419:
@@ -67,10 +69,14 @@ cpucap_is_possible(const unsigned int cap)
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
*/
return true;
+ case ARM64_HAS_PMUV3:
+ return IS_ENABLED(CONFIG_HW_PERF_EVENTS);
+ case ARM64_HAS_LSUI:
+ return IS_ENABLED(CONFIG_ARM64_LSUI);
}
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b64e49bd9d10..4de51f8d92cb 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -19,7 +19,7 @@
#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4
#define ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF 8
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bug.h>
#include <linux/jump_label.h>
@@ -199,7 +199,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
* registers (e.g, SCTLR, TCR etc.) or patching the kernel via
* alternatives. The kernel patching is batched and performed at later
* point. The actions are always initiated only after the capability
- * is finalised. This is usally denoted by "enabling" the capability.
+ * is finalised. This is usually denoted by "enabling" the capability.
* The actions are initiated as follows :
* a) Action is triggered on all online CPUs, after the capability is
* finalised, invoked within the stop_machine() context from
@@ -251,7 +251,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
/*
- * The capabilitiy is detected on the Boot CPU and is used by kernel
+ * The capability is detected on the Boot CPU and is used by kernel
* during early boot. i.e, the capability should be "detected" and
* "enabled" as early as possibly on all booting CPUs.
*/
@@ -275,6 +275,14 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/* Panic when a conflict is detected */
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
+/*
+ * When paired with SCOPE_LOCAL_CPU, all early CPUs must satisfy the
+ * condition. This is different from SCOPE_SYSTEM where the check is performed
+ * only once at the end of the SMP boot on the sanitised ID registers.
+ * SCOPE_SYSTEM is not suitable for cases where the capability depends on
+ * properties local to a CPU like MIDR_EL1.
+ */
+#define ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS ((u16)BIT(7))
/*
* CPU errata workarounds that need to be enabled at boot time if one or
@@ -304,6 +312,16 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
(ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+/*
+ * CPU feature detected at boot time and present on all early CPUs. Late CPUs
+ * are permitted to have the feature even if it hasn't been enabled, although
+ * the feature will not be used by Linux in this case. If all early CPUs have
+ * the feature, then every late CPU must have it.
+ */
+#define ARM64_CPUCAP_EARLY_LOCAL_CPU_FEATURE \
+ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
+ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU | \
+ ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS)
/*
* CPU feature detected at boot time, on one or more CPUs. A late CPU
@@ -391,6 +409,11 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
return cap->type & ARM64_CPUCAP_SCOPE_MASK;
}
+static inline bool cpucap_match_all_early_cpus(const struct arm64_cpu_capabilities *cap)
+{
+ return cap->type & ARM64_CPUCAP_MATCH_ALL_EARLY_CPUS;
+}
+
/*
* Generic helper for handling capabilities with multiple (match,enable) pairs
* of call backs, sharing the same capability bit.
@@ -525,29 +548,6 @@ cpuid_feature_extract_unsigned_field(u64 features, int field)
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
}
-/*
- * Fields that identify the version of the Performance Monitors Extension do
- * not follow the standard ID scheme. See ARM DDI 0487E.a page D13-2825,
- * "Alternative ID scheme used for the Performance Monitors Extension version".
- */
-static inline u64 __attribute_const__
-cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap)
-{
- u64 val = cpuid_feature_extract_unsigned_field(features, field);
- u64 mask = GENMASK_ULL(field + 3, field);
-
- /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */
- if (val == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
- val = 0;
-
- if (val > cap) {
- features &= ~mask;
- features |= (cap << field) & mask;
- }
-
- return features;
-}
-
static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
{
return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -671,6 +671,7 @@ static inline bool supports_clearbhb(int scope)
}
const struct cpumask *system_32bit_el0_cpumask(void);
+const struct cpumask *fallback_32bit_el0_cpumask(void);
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
static inline bool system_supports_32bit_el0(void)
@@ -847,14 +848,12 @@ static inline bool system_supports_poe(void)
static inline bool system_supports_gcs(void)
{
- return IS_ENABLED(CONFIG_ARM64_GCS) &&
- alternative_has_cap_unlikely(ARM64_HAS_GCS);
+ return alternative_has_cap_unlikely(ARM64_HAS_GCS);
}
static inline bool system_supports_haft(void)
{
- return IS_ENABLED(CONFIG_ARM64_HAFT) &&
- cpus_have_final_cap(ARM64_HAFT);
+ return cpus_have_final_cap(ARM64_HAFT);
}
static __always_inline bool system_supports_mpam(void)
@@ -867,6 +866,18 @@ static __always_inline bool system_supports_mpam_hcr(void)
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
}
+static inline bool system_supports_pmuv3(void)
+{
+ return cpus_have_final_cap(ARM64_HAS_PMUV3);
+}
+
+bool cpu_supports_bbml2_noabort(void);
+
+static inline bool system_supports_bbml2_noabort(void)
+{
+ return alternative_has_cap_unlikely(ARM64_HAS_BBML2_NOABORT);
+}
+
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
@@ -1067,6 +1078,6 @@ static inline bool cpu_has_lpa2(void)
#endif
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 488f8e751349..08860d482e60 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
@@ -91,9 +92,11 @@
#define ARM_CPU_PART_NEOVERSE_V2 0xD4F
#define ARM_CPU_PART_CORTEX_A720 0xD81
#define ARM_CPU_PART_CORTEX_X4 0xD82
+#define ARM_CPU_PART_NEOVERSE_V3AE 0xD83
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
+#define ARM_CPU_PART_CORTEX_A720AE 0xD89
#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_XGENE 0x000
@@ -119,16 +122,21 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
+#define QCOM_CPU_PART_ORYON_X1 0x001
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
+#define NVIDIA_CPU_PART_OLYMPUS 0x010
#define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01
+#define HISI_CPU_PART_HIP09 0xD02
+#define HISI_CPU_PART_HIP12 0xD06
#define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -158,6 +166,7 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
@@ -174,9 +183,11 @@
#define MIDR_NEOVERSE_V2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V2)
#define MIDR_CORTEX_A720 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720)
#define MIDR_CORTEX_X4 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X4)
+#define MIDR_NEOVERSE_V3AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3AE)
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+#define MIDR_CORTEX_A720AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720AE)
#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
@@ -195,13 +206,28 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+#define MIDR_QCOM_ORYON_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_ORYON_X1)
+
+/*
+ * NOTES:
+ * - Qualcomm Kryo 5XX Prime / Gold ID themselves as MIDR_CORTEX_A77
+ * - Qualcomm Kryo 5XX Silver IDs itself as MIDR_QCOM_KRYO_4XX_SILVER
+ * - Qualcomm Kryo 6XX Prime IDs itself as MIDR_CORTEX_X1
+ * - Qualcomm Kryo 6XX Gold IDs itself as ARM_CPU_PART_CORTEX_A78
+ * - Qualcomm Kryo 6XX Silver IDs itself as MIDR_CORTEX_A55
+ */
+
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+#define MIDR_NVIDIA_OLYMPUS MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_OLYMPUS)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
+#define MIDR_HISI_HIP12 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP12)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
@@ -221,15 +247,25 @@
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0))
-#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
+#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_EL1_NFD1 | TCR_EL1_NFD0)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/sysreg.h>
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline u32 __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(MIDR_EL1);
+}
+
+/*
* Represent a range of MIDR values for a given CPU model and a
* range of variant/revision values.
*
@@ -264,30 +300,14 @@ static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
return _model == model && rv >= rv_min && rv <= rv_max;
}
-static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
-{
- return midr_is_cpu_model_range(midr, range->model,
- range->rv_min, range->rv_max);
-}
-
-static inline bool
-is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
-{
- while (ranges->model)
- if (is_midr_in_range(midr, ranges++))
- return true;
- return false;
-}
+struct target_impl_cpu {
+ u64 midr;
+ u64 revidr;
+ u64 aidr;
+};
-/*
- * The CPU ID never changes at run time, so we might as well tell the
- * compiler that it's constant. Use this function to read the CPU ID
- * rather than directly reading processor_id or read_cpuid() directly.
- */
-static inline u32 __attribute_const__ read_cpuid_id(void)
-{
- return read_cpuid(MIDR_EL1);
-}
+bool cpu_errata_set_target_impl(u64 num, void *impl_cpus);
+bool is_midr_in_range_list(struct midr_range const *ranges);
static inline u64 __attribute_const__ read_cpuid_mpidr(void)
{
@@ -308,6 +328,6 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
{
return read_cpuid(CTR_EL0);
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
index 54ceae0874c7..c92912eaf186 100644
--- a/arch/arm64/include/asm/current.h
+++ b/arch/arm64/include/asm/current.h
@@ -4,7 +4,7 @@
#include <linux/compiler.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
@@ -23,7 +23,7 @@ static __always_inline struct task_struct *get_current(void)
#define current get_current()
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_CURRENT_H */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index fbb5c99eb2f9..5fca48009043 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -128,7 +128,7 @@ static inline void local_daif_inherit(struct pt_regs *regs)
{
unsigned long flags = regs->pstate & DAIF_MASK;
- if (interrupts_enabled(regs))
+ if (!regs_irqs_disabled(regs))
trace_hardirqs_on();
if (system_uses_irq_prio_masking())
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 8f6ba31b8658..8d5f92418838 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -13,14 +13,8 @@
#include <asm/ptrace.h>
/* Low-level stepping controls. */
-#define DBG_MDSCR_SS (1 << 0)
#define DBG_SPSR_SS (1 << 21)
-/* MDSCR_EL1 enabling bits */
-#define DBG_MDSCR_KDE (1 << 13)
-#define DBG_MDSCR_MDE (1 << 15)
-#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
-
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
/* AArch64 */
@@ -54,7 +48,7 @@
#define AARCH32_BREAK_THUMB2_LO 0xf7f0
#define AARCH32_BREAK_THUMB2_HI 0xa000
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
@@ -62,30 +56,6 @@ struct task_struct;
#define DBG_HOOK_HANDLED 0
#define DBG_HOOK_ERROR 1
-struct step_hook {
- struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned long esr);
-};
-
-void register_user_step_hook(struct step_hook *hook);
-void unregister_user_step_hook(struct step_hook *hook);
-
-void register_kernel_step_hook(struct step_hook *hook);
-void unregister_kernel_step_hook(struct step_hook *hook);
-
-struct break_hook {
- struct list_head node;
- int (*fn)(struct pt_regs *regs, unsigned long esr);
- u16 imm;
- u16 mask; /* These bits are ignored when comparing with imm */
-};
-
-void register_user_break_hook(struct break_hook *hook);
-void unregister_user_break_hook(struct break_hook *hook);
-
-void register_kernel_break_hook(struct break_hook *hook);
-void unregister_kernel_break_hook(struct break_hook *hook);
-
u8 debug_monitors_arch(void);
enum dbg_active_el {
@@ -108,17 +78,15 @@ void kernel_rewind_single_step(struct pt_regs *regs);
void kernel_fastforward_single_step(struct pt_regs *regs);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
-int reinstall_suspended_bps(struct pt_regs *regs);
+bool try_step_suspended_breakpoints(struct pt_regs *regs);
#else
-static inline int reinstall_suspended_bps(struct pt_regs *regs)
+static inline bool try_step_suspended_breakpoints(struct pt_regs *regs)
{
- return -ENODEV;
+ return false;
}
#endif
-int aarch32_break_handler(struct pt_regs *regs);
-
-void debug_traps_init(void);
+bool try_handle_aarch32_break(struct pt_regs *regs);
-#endif /* __ASSEMBLY */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index bcd5622aa096..e8a9783235cb 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -45,7 +45,7 @@ void arch_efi_call_virt_teardown(void);
* switching to the EFI runtime stack.
*/
#define current_in_efi() \
- (!preemptible() && efi_rt_stack_top != NULL && \
+ (efi_rt_stack_top != NULL && \
on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1))
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
@@ -126,21 +126,14 @@ static inline void efi_set_pgd(struct mm_struct *mm)
if (mm != current->active_mm) {
/*
* Update the current thread's saved ttbr0 since it is
- * restored as part of a return from exception. Enable
- * access to the valid TTBR0_EL1 and invoke the errata
- * workaround directly since there is no return from
- * exception when invoking the EFI run-time services.
+ * restored as part of a return from exception.
*/
update_saved_ttbr0(current, mm);
- uaccess_ttbr0_enable();
- post_ttbr_update_workaround();
} else {
/*
- * Defer the switch to the current thread's TTBR0_EL1
- * until uaccess_enable(). Restore the current
- * thread's saved ttbr0 corresponding to its active_mm
+ * Restore the current thread's saved ttbr0
+ * corresponding to its active_mm
*/
- uaccess_ttbr0_disable();
update_saved_ttbr0(current, current->active_mm);
}
}
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 85ef966c08cd..587507a9980e 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -7,7 +7,7 @@
#ifndef __ARM_KVM_INIT_H__
#define __ARM_KVM_INIT_H__
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#error Assembly-only header
#endif
@@ -16,6 +16,56 @@
#include <asm/sysreg.h>
#include <linux/irqchip/arm-gic-v3.h>
+.macro init_el2_hcr val
+ mov_q x0, \val
+
+ /*
+ * Compliant CPUs advertise their VHE-onlyness with
+ * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it
+ * can reset into an UNKNOWN state and might not read as 1 until it has
+ * been initialized explicitly.
+ * Initialize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H
+ * indicating whether the CPU is running in E2H mode.
+ */
+ mrs_s x1, SYS_ID_AA64MMFR4_EL1
+ sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
+ cmp x1, #0
+ b.lt .LnE2H0_\@
+
+ /*
+ * Unfortunately, HCR_EL2.E2H can be RES1 even if not advertised
+ * as such via ID_AA64MMFR4_EL1.E2H0:
+ *
+ * - Fruity CPUs predate the !FEAT_E2H0 relaxation, and seem to
+ * have HCR_EL2.E2H implemented as RAO/WI.
+ *
+ * - On CPUs that lack FEAT_FGT, a hypervisor can't trap guest
+ * reads of ID_AA64MMFR4_EL1 to advertise !FEAT_E2H0. NV
+ * guests on these hosts can write to HCR_EL2.E2H without
+ * trapping to the hypervisor, but these writes have no
+ * functional effect.
+ *
+ * Handle both cases by checking for an essential VHE property
+ * (system register remapping) to decide whether we're
+ * effectively VHE-only or not.
+ */
+ msr_hcr_el2 x0 // Setup HCR_EL2 as nVHE
+ mov x1, #1 // Write something to FAR_EL1
+ msr far_el1, x1
+ isb
+ mov x1, #2 // Try to overwrite it via FAR_EL2
+ msr far_el2, x1
+ isb
+ mrs x1, far_el1 // If we see the latest write in FAR_EL1,
+ cmp x1, #2 // we can safely assume we are VHE only.
+ b.ne .LnVHE_\@ // Otherwise, we know that nVHE works.
+
+.LnE2H0_\@:
+ orr x0, x0, #HCR_E2H
+ msr_hcr_el2 x0
+.LnVHE_\@:
+.endm
+
.macro __init_el2_sctlr
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
msr sctlr_el2, x0
@@ -26,14 +76,24 @@
mrs x0, id_aa64mmfr1_el1
ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
cbz x0, .Lskip_hcrx_\@
- mov_q x0, HCRX_HOST_FLAGS
+ mov_q x0, (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
/* Enable GCS if supported */
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
- cbz x1, .Lset_hcrx_\@
+ cbz x1, .Lskip_gcs_hcrx_\@
orr x0, x0, #HCRX_EL2_GCSEn
+.Lskip_gcs_hcrx_\@:
+ /* Enable LS64, LS64_V if supported */
+ mrs_s x1, SYS_ID_AA64ISAR1_EL1
+ ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4
+ cbz x1, .Lset_hcrx_\@
+ orr x0, x0, #HCRX_EL2_EnALS
+ cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V
+ b.lt .Lset_hcrx_\@
+ orr x0, x0, #HCRX_EL2_EnASR
+
.Lset_hcrx_\@:
msr_s SYS_HCRX_EL2, x0
.Lskip_hcrx_\@:
@@ -65,6 +125,14 @@
msr cntvoff_el2, xzr // Clear virtual offset
.endm
+/* Branch to skip_label if SPE version is less than given version */
+.macro __spe_vers_imp skip_label, version, tmp
+ mrs \tmp, id_aa64dfr0_el1
+ ubfx \tmp, \tmp, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
+ cmp \tmp, \version
+ b.lt \skip_label
+.endm
+
.macro __init_el2_debug
mrs x1, id_aa64dfr0_el1
ubfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
@@ -77,8 +145,7 @@
csel x2, xzr, x0, eq // all PMU counters from EL1
/* Statistical profiling */
- ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
- cbz x0, .Lskip_spe_\@ // Skip if SPE not present
+ __spe_vers_imp .Lskip_spe_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x0 // Skip if SPE not present
mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT)
@@ -87,7 +154,7 @@
1 << PMSCR_EL2_PA_SHIFT)
msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
.Lskip_spe_el2_\@:
- mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ mov x0, #MDCR_EL2_E2PB_MASK
orr x2, x2, x0 // If we don't have VHE, then
// use EL1&0 translation.
@@ -100,7 +167,7 @@
and x0, x0, TRBIDR_EL1_P
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
- mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
+ mov x0, #MDCR_EL2_E2TB_MASK
orr x2, x2, x0 // allow the EL1&0 translation
// to own it.
@@ -139,6 +206,51 @@
.Lskip_gicv3_\@:
.endm
+/* GICv5 system register access */
+.macro __init_el2_gicv5
+ mrs_s x0, SYS_ID_AA64PFR2_EL1
+ ubfx x0, x0, #ID_AA64PFR2_EL1_GCIE_SHIFT, #4
+ cbz x0, .Lskip_gicv5_\@
+
+ mov x0, #(ICH_HFGITR_EL2_GICRCDNMIA | \
+ ICH_HFGITR_EL2_GICRCDIA | \
+ ICH_HFGITR_EL2_GICCDDI | \
+ ICH_HFGITR_EL2_GICCDEOI | \
+ ICH_HFGITR_EL2_GICCDHM | \
+ ICH_HFGITR_EL2_GICCDRCFG | \
+ ICH_HFGITR_EL2_GICCDPEND | \
+ ICH_HFGITR_EL2_GICCDAFF | \
+ ICH_HFGITR_EL2_GICCDPRI | \
+ ICH_HFGITR_EL2_GICCDDIS | \
+ ICH_HFGITR_EL2_GICCDEN)
+ msr_s SYS_ICH_HFGITR_EL2, x0 // Disable instruction traps
+ mov_q x0, (ICH_HFGRTR_EL2_ICC_PPI_ACTIVERn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_PENDRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_ENABLERn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PPI_HMRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_IAFFIDR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_ICSR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_PCR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_HPPIR_EL1 | \
+ ICH_HFGRTR_EL2_ICC_CR0_EL1 | \
+ ICH_HFGRTR_EL2_ICC_IDRn_EL1 | \
+ ICH_HFGRTR_EL2_ICC_APR_EL1)
+ msr_s SYS_ICH_HFGRTR_EL2, x0 // Disable reg read traps
+ mov_q x0, (ICH_HFGWTR_EL2_ICC_PPI_ACTIVERn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_PRIORITYRn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_PENDRn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PPI_ENABLERn_EL1 | \
+ ICH_HFGWTR_EL2_ICC_ICSR_EL1 | \
+ ICH_HFGWTR_EL2_ICC_PCR_EL1 | \
+ ICH_HFGWTR_EL2_ICC_CR0_EL1 | \
+ ICH_HFGWTR_EL2_ICC_APR_EL1)
+ msr_s SYS_ICH_HFGWTR_EL2, x0 // Disable reg write traps
+ mov x0, #(ICH_VCTLR_EL2_En)
+ msr_s SYS_ICH_VCTLR_EL2, x0 // Enable vHPPI selection
+.Lskip_gicv5_\@:
+.endm
+
.macro __init_el2_hstr
msr hstr_el2, xzr // Disable CP15 traps to EL2
.endm
@@ -154,7 +266,7 @@
/* Coprocessor traps */
.macro __init_el2_cptr
__check_hvhe .LnVHE_\@, x1
- mov x0, #CPACR_ELx_FPEN
+ mov x0, #CPACR_EL1_FPEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
.LnVHE_\@:
@@ -163,6 +275,28 @@
.Lskip_set_cptr_\@:
.endm
+/*
+ * Configure BRBE to permit recording cycle counts and branch mispredicts.
+ *
+ * At any EL, to record cycle counts BRBE requires that both BRBCR_EL2.CC=1 and
+ * BRBCR_EL1.CC=1.
+ *
+ * At any EL, to record branch mispredicts BRBE requires that both
+ * BRBCR_EL2.MPRED=1 and BRBCR_EL1.MPRED=1.
+ *
+ * Set {CC,MPRED} in BRBCR_EL2 in case nVHE mode is used and we are
+ * executing in EL1.
+ */
+.macro __init_el2_brbe
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_\@
+
+ mov_q x0, BRBCR_ELx_CC | BRBCR_ELx_MPRED
+ msr_s SYS_BRBCR_EL2, x0
+.Lskip_brbe_\@:
+.endm
+
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
@@ -170,34 +304,76 @@
cbz x1, .Lskip_fgt_\@
mov x0, xzr
- mrs x1, id_aa64dfr0_el1
- ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
- cmp x1, #3
- b.lt .Lskip_spe_fgt_\@
+ mov x2, xzr
+ /* If SPEv1p2 is implemented, */
+ __spe_vers_imp .Lskip_spe_fgt_\@, #ID_AA64DFR0_EL1_PMSVer_V1P2, x1
/* Disable PMSNEVFR_EL1 read and write traps */
- orr x0, x0, #(1 << 62)
+ orr x0, x0, #HDFGRTR_EL2_nPMSNEVFR_EL1_MASK
+ orr x2, x2, #HDFGWTR_EL2_nPMSNEVFR_EL1_MASK
.Lskip_spe_fgt_\@:
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_fgt_\@
+
+ /*
+ * Disable read traps for the following registers
+ *
+ * [BRBSRC|BRBTGT|RBINF]_EL1
+ * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1
+ */
+ orr x0, x0, #HDFGRTR_EL2_nBRBDATA_MASK
+
+ /*
+ * Disable write traps for the following registers
+ *
+ * [BRBSRCINJ|BRBTGTINJ|BRBINFINJ|BRBTS]_EL1
+ */
+ orr x2, x2, #HDFGWTR_EL2_nBRBDATA_MASK
+
+ /* Disable read and write traps for [BRBCR|BRBFCR]_EL1 */
+ orr x0, x0, #HDFGRTR_EL2_nBRBCTL_MASK
+ orr x2, x2, #HDFGWTR_EL2_nBRBCTL_MASK
+
+ /* Disable read traps for BRBIDR_EL1 */
+ orr x0, x0, #HDFGRTR_EL2_nBRBIDR_MASK
+
+.Lskip_brbe_fgt_\@:
+
+.Lset_debug_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
- msr_s SYS_HDFGWTR_EL2, x0
+ msr_s SYS_HDFGWTR_EL2, x2
mov x0, xzr
+ mov x2, xzr
+
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_BRBE_SHIFT, #4
+ cbz x1, .Lskip_brbe_insn_fgt_\@
+
+ /* Disable traps for BRBIALL instruction */
+ orr x2, x2, #HFGITR_EL2_nBRBIALL_MASK
+
+ /* Disable traps for BRBINJ instruction */
+ orr x2, x2, #HFGITR_EL2_nBRBINJ_MASK
+
+.Lskip_brbe_insn_fgt_\@:
mrs x1, id_aa64pfr1_el1
ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
- cbz x1, .Lskip_debug_fgt_\@
+ cbz x1, .Lskip_sme_fgt_\@
/* Disable nVHE traps of TPIDR2 and SMPRI */
- orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
- orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
+ orr x0, x0, #HFGRTR_EL2_nSMPRI_EL1_MASK
+ orr x0, x0, #HFGRTR_EL2_nTPIDR2_EL0_MASK
-.Lskip_debug_fgt_\@:
+.Lskip_sme_fgt_\@:
mrs_s x1, SYS_ID_AA64MMFR3_EL1
ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
cbz x1, .Lskip_pie_fgt_\@
/* Disable trapping of PIR_EL1 / PIRE0_EL1 */
- orr x0, x0, #HFGxTR_EL2_nPIR_EL1
- orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1
+ orr x0, x0, #HFGRTR_EL2_nPIR_EL1
+ orr x0, x0, #HFGRTR_EL2_nPIRE0_EL1
.Lskip_pie_fgt_\@:
mrs_s x1, SYS_ID_AA64MMFR3_EL1
@@ -205,22 +381,24 @@
cbz x1, .Lskip_poe_fgt_\@
/* Disable trapping of POR_EL0 */
- orr x0, x0, #HFGxTR_EL2_nPOR_EL0
+ orr x0, x0, #HFGRTR_EL2_nPOR_EL0
.Lskip_poe_fgt_\@:
/* GCS depends on PIE so we don't check it if PIE is absent */
mrs_s x1, SYS_ID_AA64PFR1_EL1
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
- cbz x1, .Lset_fgt_\@
+ cbz x1, .Lskip_gce_fgt_\@
/* Disable traps of access to GCS registers at EL0 and EL1 */
- orr x0, x0, #HFGxTR_EL2_nGCS_EL1_MASK
- orr x0, x0, #HFGxTR_EL2_nGCS_EL0_MASK
+ orr x0, x0, #HFGRTR_EL2_nGCS_EL1_MASK
+ orr x0, x0, #HFGRTR_EL2_nGCS_EL0_MASK
+
+.Lskip_gce_fgt_\@:
.Lset_fgt_\@:
msr_s SYS_HFGRTR_EL2, x0
msr_s SYS_HFGWTR_EL2, x0
- msr_s SYS_HFGITR_EL2, xzr
+ msr_s SYS_HFGITR_EL2, x2
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
@@ -233,33 +411,39 @@
.Lskip_fgt_\@:
.endm
-.macro __init_el2_gcs
- mrs_s x1, SYS_ID_AA64PFR1_EL1
- ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
- cbz x1, .Lskip_gcs_\@
-
- /* Ensure GCS is not enabled when we start trying to do BLs */
- msr_s SYS_GCSCR_EL1, xzr
- msr_s SYS_GCSCRE0_EL1, xzr
-.Lskip_gcs_\@:
-.endm
-
-.macro __init_el2_nvhe_prepare_eret
- mov x0, #INIT_PSTATE_EL1
- msr spsr_el2, x0
-.endm
+.macro __init_el2_fgt2
+ mrs x1, id_aa64mmfr0_el1
+ ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
+ cmp x1, #ID_AA64MMFR0_EL1_FGT_FGT2
+ b.lt .Lskip_fgt2_\@
-.macro __init_el2_mpam
- /* Memory Partitioning And Monitoring: disable EL2 traps */
- mrs x1, id_aa64pfr0_el1
- ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4
- cbz x0, .Lskip_mpam_\@ // skip if no MPAM
- msr_s SYS_MPAM2_EL2, xzr // use the default partition
- // and disable lower traps
- mrs_s x0, SYS_MPAMIDR_EL1
- tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
- msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
-.Lskip_mpam_\@:
+ mov x0, xzr
+ mrs x1, id_aa64dfr0_el1
+ ubfx x1, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
+ cmp x1, #ID_AA64DFR0_EL1_PMUVer_V3P9
+ b.lt .Lskip_pmuv3p9_\@
+
+ orr x0, x0, #HDFGRTR2_EL2_nPMICNTR_EL0
+ orr x0, x0, #HDFGRTR2_EL2_nPMICFILTR_EL0
+ orr x0, x0, #HDFGRTR2_EL2_nPMUACR_EL1
+.Lskip_pmuv3p9_\@:
+ /* If SPE is implemented, */
+ __spe_vers_imp .Lskip_spefds_\@, ID_AA64DFR0_EL1_PMSVer_IMP, x1
+ /* we can read PMSIDR and */
+ mrs_s x1, SYS_PMSIDR_EL1
+ and x1, x1, #PMSIDR_EL1_FDS
+ /* if FEAT_SPE_FDS is implemented, */
+ cbz x1, .Lskip_spefds_\@
+ /* disable traps of PMSDSFR to EL2. */
+ orr x0, x0, #HDFGRTR2_EL2_nPMSDSFR_EL1
+
+.Lskip_spefds_\@:
+ msr_s SYS_HDFGRTR2_EL2, x0
+ msr_s SYS_HDFGWTR2_EL2, x0
+ msr_s SYS_HFGRTR2_EL2, xzr
+ msr_s SYS_HFGWTR2_EL2, xzr
+ msr_s SYS_HFGITR2_EL2, xzr
+.Lskip_fgt2_\@:
.endm
/**
@@ -275,15 +459,16 @@
__init_el2_hcrx
__init_el2_timers
__init_el2_debug
+ __init_el2_brbe
__init_el2_lor
__init_el2_stage2
__init_el2_gicv3
+ __init_el2_gicv5
__init_el2_hstr
- __init_el2_mpam
__init_el2_nvhe_idregs
__init_el2_cptr
__init_el2_fgt
- __init_el2_gcs
+ __init_el2_fgt2
.endm
#ifndef __KVM_NVHE_HYPERVISOR__
@@ -325,6 +510,24 @@
#endif
.macro finalise_el2_state
+ check_override id_aa64pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT, .Linit_mpam_\@, .Lskip_mpam_\@, x1, x2
+
+.Linit_mpam_\@:
+ mov x0, #MPAM2_EL2_EnMPAMSM_MASK
+ msr_s SYS_MPAM2_EL2, x0 // use the default partition,
+ // and disable lower traps
+ mrs_s x0, SYS_MPAMIDR_EL1
+ tbz x0, #MPAMIDR_EL1_HAS_HCR_SHIFT, .Lskip_mpam_\@ // skip if no MPAMHCR reg
+ msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
+
+.Lskip_mpam_\@:
+ check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2
+
+.Linit_gcs_\@:
+ msr_s SYS_GCSCR_EL1, xzr
+ msr_s SYS_GCSCRE0_EL1, xzr
+
+.Lskip_gcs_\@:
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
.Linit_sve_\@: /* SVE register access */
@@ -332,7 +535,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SVE traps
- orr x0, x0, #CPACR_ELx_ZEN
+ orr x0, x0, #CPACR_EL1_ZEN
msr cpacr_el1, x0
b .Lskip_set_cptr_\@
@@ -353,7 +556,7 @@
// (h)VHE case
mrs x0, cpacr_el1 // Disable SME traps
- orr x0, x0, #CPACR_ELx_SMEN
+ orr x0, x0, #CPACR_EL1_SMEN
msr cpacr_el1, x0
b .Lskip_set_cptr_sme_\@
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 3f93f4eef953..d2779d604c7b 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -133,7 +133,7 @@
#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3)
#endif /* CONFIG_ARM64_FORCE_52BIT */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <uapi/linux/elf.h>
#include <linux/bug.h>
@@ -293,6 +293,6 @@ static inline int arch_check_elf(void *ehdr, bool has_interp,
return 0;
}
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/entry-common.h b/arch/arm64/include/asm/entry-common.h
new file mode 100644
index 000000000000..cab8cd78f693
--- /dev/null
+++ b/arch/arm64/include/asm/entry-common.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_ARM64_ENTRY_COMMON_H
+#define _ASM_ARM64_ENTRY_COMMON_H
+
+#include <linux/thread_info.h>
+
+#include <asm/cpufeature.h>
+#include <asm/daifflags.h>
+#include <asm/fpsimd.h>
+#include <asm/mte.h>
+#include <asm/stacktrace.h>
+
+#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_MTE_ASYNC_FAULT | _TIF_FOREIGN_FPSTATE)
+
+static __always_inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ if (ti_work & _TIF_MTE_ASYNC_FAULT) {
+ clear_thread_flag(TIF_MTE_ASYNC_FAULT);
+ send_sig_fault(SIGSEGV, SEGV_MTEAERR, (void __user *)NULL, current);
+ }
+
+ if (ti_work & _TIF_FOREIGN_FPSTATE)
+ fpsimd_restore_current_state();
+}
+
+#define arch_exit_to_user_mode_work arch_exit_to_user_mode_work
+
+static inline bool arch_irqentry_exit_need_resched(void)
+{
+ /*
+ * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+ * priority masking is used the GIC irqchip driver will clear DAIF.IF
+ * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+ * DAIF we must have handled an NMI, so skip preemption.
+ */
+ if (system_uses_irq_prio_masking() && read_sysreg(daif))
+ return false;
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (!system_capabilities_finalized())
+ return false;
+
+ return true;
+}
+
+#define arch_irqentry_exit_need_resched arch_irqentry_exit_need_resched
+
+#endif /* _ASM_ARM64_ENTRY_COMMON_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d1b1a33f9a8b..7e86d400864e 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -20,7 +20,8 @@
#define ESR_ELx_EC_FP_ASIMD UL(0x07)
#define ESR_ELx_EC_CP10_ID UL(0x08) /* EL2 only */
#define ESR_ELx_EC_PAC UL(0x09) /* EL2 and above */
-/* Unallocated EC: 0x0A - 0x0B */
+#define ESR_ELx_EC_OTHER UL(0x0A)
+/* Unallocated EC: 0x0B */
#define ESR_ELx_EC_CP14_64 UL(0x0C)
#define ESR_ELx_EC_BTI UL(0x0D)
#define ESR_ELx_EC_ILL UL(0x0E)
@@ -99,6 +100,8 @@
#define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT)
/* Shared ISS field definitions for Data/Instruction aborts */
+#define ESR_ELx_VNCR_SHIFT (13)
+#define ESR_ELx_VNCR (UL(1) << ESR_ELx_VNCR_SHIFT)
#define ESR_ELx_SET_SHIFT (11)
#define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT)
#define ESR_ELx_FnV_SHIFT (10)
@@ -121,6 +124,16 @@
#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+#define ESR_ELx_FSC_EXCL_ATOMIC (0x35)
+#define ESR_ELx_FSC_ADDRSZ (0x00)
+
+/*
+ * Annoyingly, the negative levels for Address size faults aren't laid out
+ * contiguously (or in the desired order)
+ */
+#define ESR_ELx_FSC_ADDRSZ_nL(n) ((n) == -1 ? 0x25 : 0x2C)
+#define ESR_ELx_FSC_ADDRSZ_L(n) ((n) < 0 ? ESR_ELx_FSC_ADDRSZ_nL(n) : \
+ (ESR_ELx_FSC_ADDRSZ + (n)))
/* Status codes for individual page table levels */
#define ESR_ELx_FSC_ACCESS_L(n) (ESR_ELx_FSC_ACCESS + (n))
@@ -161,8 +174,6 @@
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
/* ISS field definitions for exceptions taken in to Hyp */
-#define ESR_ELx_FSC_ADDRSZ (0x00)
-#define ESR_ELx_FSC_ADDRSZ_L(n) (ESR_ELx_FSC_ADDRSZ + (n))
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
@@ -174,6 +185,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((UL(1) << 16) - 1)
+/* ISS definitions for LD64B/ST64B/{T,P}SBCSYNC instructions */
+#define ESR_ELx_ISS_OTHER_ST64BV (0)
+#define ESR_ELx_ISS_OTHER_ST64BV0 (1)
+#define ESR_ELx_ISS_OTHER_LDST64B (2)
+#define ESR_ELx_ISS_OTHER_TSBCSYNC (3)
+#define ESR_ELx_ISS_OTHER_PSBCSYNC (4)
+
#define DISR_EL1_IDS (UL(1) << 24)
/*
* DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
@@ -371,12 +389,14 @@
/*
* ISS values for SME traps
*/
+#define ESR_ELx_SME_ISS_SMTC_MASK GENMASK(2, 0)
+#define ESR_ELx_SME_ISS_SMTC(esr) ((esr) & ESR_ELx_SME_ISS_SMTC_MASK)
-#define ESR_ELx_SME_ISS_SME_DISABLED 0
-#define ESR_ELx_SME_ISS_ILL 1
-#define ESR_ELx_SME_ISS_SM_DISABLED 2
-#define ESR_ELx_SME_ISS_ZA_DISABLED 3
-#define ESR_ELx_SME_ISS_ZT_DISABLED 4
+#define ESR_ELx_SME_ISS_SMTC_SME_DISABLED 0
+#define ESR_ELx_SME_ISS_SMTC_ILL 1
+#define ESR_ELx_SME_ISS_SMTC_SM_DISABLED 2
+#define ESR_ELx_SME_ISS_SMTC_ZA_DISABLED 3
+#define ESR_ELx_SME_ISS_SMTC_ZT_DISABLED 4
/* ISS field definitions for MOPS exceptions */
#define ESR_ELx_MOPS_ISS_MEM_INST (UL(1) << 24)
@@ -412,7 +432,7 @@
#define ESR_ELx_IT_GCSPOPCX 6
#define ESR_ELx_IT_GCSPOPX 7
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/types.h>
static inline unsigned long esr_brk_comment(unsigned long esr)
@@ -433,6 +453,11 @@ static inline bool esr_is_cfi_brk(unsigned long esr)
(esr_brk_comment(esr) & ~CFI_BRK_IMM_MASK) == CFI_BRK_IMM_BASE;
}
+static inline bool esr_is_ubsan_brk(unsigned long esr)
+{
+ return (esr_brk_comment(esr) & ~UBSAN_BRK_MASK) == UBSAN_BRK_IMM;
+}
+
static inline bool esr_fsc_is_translation_fault(unsigned long esr)
{
esr = esr & ESR_ELx_FSC;
@@ -464,6 +489,46 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
(esr == ESR_ELx_FSC_ACCESS_L(0));
}
+static inline bool esr_fsc_is_excl_atomic_fault(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return esr == ESR_ELx_FSC_EXCL_ATOMIC;
+}
+
+static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
+{
+ esr &= ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_ADDRSZ_L(3)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(2)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(1)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(0)) ||
+ (esr == ESR_ELx_FSC_ADDRSZ_L(-1));
+}
+
+static inline bool esr_fsc_is_sea_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SEA_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SEA_TTW(-1));
+}
+
+static inline bool esr_fsc_is_secc_ttw(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return (esr == ESR_ELx_FSC_SECC_TTW(3)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(2)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(1)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(0)) ||
+ (esr == ESR_ELx_FSC_SECC_TTW(-1));
+}
+
/* Indicate whether ESR.EC==0x1A is for an ERETAx instruction */
static inline bool esr_iss_is_eretax(unsigned long esr)
{
@@ -477,6 +542,6 @@ static inline bool esr_iss_is_eretab(unsigned long esr)
}
const char *esr_get_class_string(unsigned long esr);
-#endif /* __ASSEMBLY */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index d48fc16584cd..a2da3cb21c24 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -59,8 +59,20 @@ void do_el0_bti(struct pt_regs *regs);
void do_el1_bti(struct pt_regs *regs, unsigned long esr);
void do_el0_gcs(struct pt_regs *regs, unsigned long esr);
void do_el1_gcs(struct pt_regs *regs, unsigned long esr);
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+void do_breakpoint(unsigned long esr, struct pt_regs *regs);
+void do_watchpoint(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
+#else
+static inline void do_breakpoint(unsigned long esr, struct pt_regs *regs) {}
+static inline void do_watchpoint(unsigned long addr, unsigned long esr,
+ struct pt_regs *regs) {}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+void do_el0_softstep(unsigned long esr, struct pt_regs *regs);
+void do_el1_softstep(unsigned long esr, struct pt_regs *regs);
+void do_el0_brk64(unsigned long esr, struct pt_regs *regs);
+void do_el1_brk64(unsigned long esr, struct pt_regs *regs);
+void do_bkpt32(unsigned long esr, struct pt_regs *regs);
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs);
void do_sve_acc(unsigned long esr, struct pt_regs *regs);
void do_sme_acc(unsigned long esr, struct pt_regs *regs);
@@ -77,7 +89,6 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr);
void do_el0_mops(struct pt_regs *regs, unsigned long esr);
void do_el1_mops(struct pt_regs *regs, unsigned long esr);
void do_serror(struct pt_regs *regs, unsigned long esr);
-void do_signal(struct pt_regs *regs);
void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h
index 72b0e71cc3de..9dc39612bdf5 100644
--- a/arch/arm64/include/asm/extable.h
+++ b/arch/arm64/include/asm/extable.h
@@ -33,6 +33,8 @@ do { \
(b)->data = (tmp).data; \
} while (0)
+bool insn_may_access_user(unsigned long addr, unsigned long esr);
+
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);
@@ -45,5 +47,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
}
#endif /* !CONFIG_BPF_JIT */
-bool fixup_exception(struct pt_regs *regs);
+bool fixup_exception(struct pt_regs *regs, unsigned long esr);
#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 87e307804b99..65555284446e 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -15,7 +15,7 @@
#ifndef _ASM_ARM64_FIXMAP_H
#define _ASM_ARM64_FIXMAP_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/kernel.h>
#include <linux/math.h>
#include <linux/sizes.h>
@@ -48,6 +48,12 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0,
+#ifdef CONFIG_KVM
+ /* One slot per CPU, mapping the guest's VNCR page at EL2. */
+ FIX_VNCR_END,
+ FIX_VNCR = FIX_VNCR_END + NR_CPUS,
+#endif
+
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
@@ -111,5 +117,5 @@ extern void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t pr
#include <asm-generic/fixmap.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* _ASM_ARM64_FIXMAP_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index f2a84efc3618..1d2e33559bd5 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -6,12 +6,13 @@
#define __ASM_FP_H
#include <asm/errno.h>
+#include <asm/percpu.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/sysreg.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitmap.h>
#include <linux/build_bug.h>
@@ -76,11 +77,9 @@ extern void fpsimd_load_state(struct user_fpsimd_state *state);
extern void fpsimd_thread_switch(struct task_struct *next);
extern void fpsimd_flush_thread(void);
-extern void fpsimd_signal_preserve_current_state(void);
extern void fpsimd_preserve_current_state(void);
extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
-extern void fpsimd_kvm_prepare(void);
struct cpu_fp_state {
struct user_fpsimd_state *st;
@@ -94,9 +93,12 @@ struct cpu_fp_state {
enum fp_type to_save;
};
+DECLARE_PER_CPU(struct cpu_fp_state, fpsimd_last_state);
+
extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
extern void fpsimd_flush_task_state(struct task_struct *target);
+extern void fpsimd_save_and_flush_current_state(void);
extern void fpsimd_save_and_flush_cpu_state(void);
static inline bool thread_sm_enabled(struct thread_struct *thread)
@@ -109,6 +111,8 @@ static inline bool thread_za_enabled(struct thread_struct *thread)
return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
}
+extern void task_smstop_sm(struct task_struct *task);
+
/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
#define VL_ARCH_MAX 0x100
@@ -196,10 +200,8 @@ struct vl_info {
extern void sve_alloc(struct task_struct *task, bool flush);
extern void fpsimd_release_task(struct task_struct *task);
-extern void fpsimd_sync_to_sve(struct task_struct *task);
-extern void fpsimd_force_sync_to_sve(struct task_struct *task);
-extern void sve_sync_to_fpsimd(struct task_struct *task);
-extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
+extern void fpsimd_sync_from_effective_state(struct task_struct *task);
+extern void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task);
extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
unsigned long vl, unsigned long flags);
@@ -293,14 +295,29 @@ static inline bool sve_vq_available(unsigned int vq)
return vq_available(ARM64_VEC_SVE, vq);
}
-size_t sve_state_size(struct task_struct const *task);
+static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl)
+{
+ unsigned int vl = max(sve_vl, sme_vl);
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+}
+
+/*
+ * Return how many bytes of memory are required to store the full SVE
+ * state for task, given task's currently configured vector length.
+ */
+static inline size_t sve_state_size(struct task_struct const *task)
+{
+ unsigned int sve_vl = task_get_sve_vl(task);
+ unsigned int sme_vl = task_get_sme_vl(task);
+ return __sve_state_size(sve_vl, sme_vl);
+}
#else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task, bool flush) { }
static inline void fpsimd_release_task(struct task_struct *task) { }
-static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
-static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
+static inline void fpsimd_sync_from_effective_state(struct task_struct *task) { }
+static inline void fpsimd_sync_to_effective_state_zeropad(struct task_struct *task) { }
static inline int sve_max_virtualisable_vl(void)
{
@@ -334,6 +351,11 @@ static inline void vec_update_vq_map(enum vec_type t) { }
static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { }
+static inline size_t __sve_state_size(unsigned int sve_vl, unsigned int sme_vl)
+{
+ return 0;
+}
+
static inline size_t sve_state_size(struct task_struct const *task)
{
return 0;
@@ -386,6 +408,16 @@ extern int sme_set_current_vl(unsigned long arg);
extern int sme_get_current_vl(void);
extern void sme_suspend_exit(void);
+static inline size_t __sme_state_size(unsigned int sme_vl)
+{
+ size_t size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(sme_vl));
+
+ if (system_supports_sme2())
+ size += ZT_SIG_REG_SIZE;
+
+ return size;
+}
+
/*
* Return how many bytes of memory are required to store the full SME
* specific state for task, given task's currently configured vector
@@ -393,15 +425,7 @@ extern void sme_suspend_exit(void);
*/
static inline size_t sme_state_size(struct task_struct const *task)
{
- unsigned int vl = task_get_sme_vl(task);
- size_t size;
-
- size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
-
- if (system_supports_sme2())
- size += ZT_SIG_REG_SIZE;
-
- return size;
+ return __sme_state_size(task_get_sme_vl(task));
}
#else
@@ -422,6 +446,11 @@ static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
static inline int sme_get_current_vl(void) { return -EINVAL; }
static inline void sme_suspend_exit(void) { }
+static inline size_t __sme_state_size(unsigned int sme_vl)
+{
+ return 0;
+}
+
static inline size_t sme_state_size(struct task_struct const *task)
{
return 0;
diff --git a/arch/arm64/include/asm/fpu.h b/arch/arm64/include/asm/fpu.h
index 2ae50bdce59b..751e88a96734 100644
--- a/arch/arm64/include/asm/fpu.h
+++ b/arch/arm64/include/asm/fpu.h
@@ -6,10 +6,22 @@
#ifndef __ASM_FPU_H
#define __ASM_FPU_H
+#include <linux/preempt.h>
#include <asm/neon.h>
#define kernel_fpu_available() cpu_has_neon()
-#define kernel_fpu_begin() kernel_neon_begin()
-#define kernel_fpu_end() kernel_neon_end()
+
+static inline void kernel_fpu_begin(void)
+{
+ BUG_ON(!in_task());
+ preempt_disable();
+ kernel_neon_begin(NULL);
+}
+
+static inline void kernel_fpu_end(void)
+{
+ kernel_neon_end(NULL);
+ preempt_enable();
+}
#endif /* ! __ASM_FPU_H */
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 5ccff4de7f09..1621c84f44b3 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -37,7 +37,7 @@
*/
#define ARCH_FTRACE_SHIFT_STACK_TRACER 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compat.h>
extern void _mcount(unsigned long);
@@ -52,6 +52,8 @@ extern unsigned long ftrace_graph_call;
extern void return_to_handler(void);
unsigned long ftrace_call_adjust(unsigned long addr);
+unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip);
+#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define HAVE_ARCH_FTRACE_REGS
@@ -129,6 +131,39 @@ ftrace_override_function_with_return(struct ftrace_regs *fregs)
arch_ftrace_regs(fregs)->pc = arch_ftrace_regs(fregs)->lr;
}
+static __always_inline unsigned long
+ftrace_regs_get_frame_pointer(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->fp;
+}
+
+static __always_inline unsigned long
+ftrace_regs_get_return_address(const struct ftrace_regs *fregs)
+{
+ return arch_ftrace_regs(fregs)->lr;
+}
+
+static __always_inline struct pt_regs *
+ftrace_partial_regs(const struct ftrace_regs *fregs, struct pt_regs *regs)
+{
+ struct __arch_ftrace_regs *afregs = arch_ftrace_regs(fregs);
+
+ memcpy(regs->regs, afregs->regs, sizeof(afregs->regs));
+ regs->sp = afregs->sp;
+ regs->pc = afregs->pc;
+ regs->regs[29] = afregs->fp;
+ regs->regs[30] = afregs->lr;
+ regs->pstate = PSR_MODE_EL1h;
+ return regs;
+}
+
+#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
+ (_regs)->pc = arch_ftrace_regs(fregs)->pc; \
+ (_regs)->regs[29] = arch_ftrace_regs(fregs)->fp; \
+ (_regs)->sp = arch_ftrace_regs(fregs)->sp; \
+ (_regs)->pstate = PSR_MODE_EL1h; \
+ } while (0)
+
int ftrace_regs_query_register_offset(const char *name);
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
@@ -182,27 +217,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
*/
return !strcmp(sym + 8, name);
}
-#endif /* ifndef __ASSEMBLY__ */
+#endif /* ifndef __ASSEMBLER__ */
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-struct fgraph_ret_regs {
- /* x0 - x7 */
- unsigned long regs[8];
-
- unsigned long fp;
- unsigned long __unused;
-};
-
-static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->regs[0];
-}
-
-static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
-{
- return ret_regs->fp;
-}
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index bc06691d2062..d1d2ff9d323a 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -9,71 +9,292 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
+#include <asm/lsui.h>
#define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */
-#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
-do { \
+#define LLSC_FUTEX_ATOMIC_OP(op, insn) \
+static __always_inline int \
+__llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
unsigned int loops = FUTEX_MAX_LOOPS; \
+ int ret, oldval, newval; \
\
uaccess_enable_privileged(); \
- asm volatile( \
-" prfm pstl1strm, %2\n" \
-"1: ldxr %w1, %2\n" \
+ asm volatile("// __llsc_futex_atomic_" #op "\n" \
+" prfm pstl1strm, %[uaddr]\n" \
+"1: ldxr %w[oldval], %[uaddr]\n" \
insn "\n" \
-"2: stlxr %w0, %w3, %2\n" \
-" cbz %w0, 3f\n" \
-" sub %w4, %w4, %w0\n" \
-" cbnz %w4, 1b\n" \
-" mov %w0, %w6\n" \
+"2: stlxr %w[ret], %w[newval], %[uaddr]\n" \
+" cbz %w[ret], 3f\n" \
+" sub %w[loops], %w[loops], %w[ret]\n" \
+" cbnz %w[loops], 1b\n" \
+" mov %w[ret], %w[err]\n" \
"3:\n" \
" dmb ish\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \
- _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \
- : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \
- "+r" (loops) \
- : "r" (oparg), "Ir" (-EAGAIN) \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret]) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret]) \
+ : [ret] "=&r" (ret), [oldval] "=&r" (oldval), \
+ [uaddr] "+Q" (*uaddr), [newval] "=&r" (newval), \
+ [loops] "+r" (loops) \
+ : [oparg] "r" (oparg), [err] "Ir" (-EAGAIN) \
: "memory"); \
uaccess_disable_privileged(); \
-} while (0)
+ \
+ if (!ret) \
+ *oval = oldval; \
+ \
+ return ret; \
+}
+
+LLSC_FUTEX_ATOMIC_OP(add, "add %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(or, "orr %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(and, "and %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(eor, "eor %w[newval], %w[oldval], %w[oparg]")
+LLSC_FUTEX_ATOMIC_OP(set, "mov %w[newval], %w[oparg]")
+
+static __always_inline int
+__llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ int ret = 0;
+ unsigned int loops = FUTEX_MAX_LOOPS;
+ u32 val, tmp;
+
+ uaccess_enable_privileged();
+ asm volatile("//__llsc_futex_cmpxchg\n"
+" prfm pstl1strm, %[uaddr]\n"
+"1: ldxr %w[curval], %[uaddr]\n"
+" eor %w[tmp], %w[curval], %w[oldval]\n"
+" cbnz %w[tmp], 4f\n"
+"2: stlxr %w[tmp], %w[newval], %[uaddr]\n"
+" cbz %w[tmp], 3f\n"
+" sub %w[loops], %w[loops], %w[tmp]\n"
+" cbnz %w[loops], 1b\n"
+" mov %w[ret], %w[err]\n"
+"3:\n"
+" dmb ish\n"
+"4:\n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w[ret])
+ _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w[ret])
+ : [ret] "+r" (ret), [curval] "=&r" (val),
+ [uaddr] "+Q" (*uaddr), [tmp] "=&r" (tmp),
+ [loops] "+r" (loops)
+ : [oldval] "r" (oldval), [newval] "r" (newval),
+ [err] "Ir" (-EAGAIN)
+ : "memory");
+ uaccess_disable_privileged();
+
+ if (!ret)
+ *oval = val;
+
+ return ret;
+}
+
+#ifdef CONFIG_ARM64_LSUI
+
+/*
+ * Wrap LSUI instructions with uaccess_ttbr0_enable()/disable(), as
+ * PAN toggling is not required.
+ */
+
+#define LSUI_FUTEX_ATOMIC_OP(op, asm_op) \
+static __always_inline int \
+__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
+ int ret = 0; \
+ int oldval; \
+ \
+ uaccess_ttbr0_enable(); \
+ \
+ asm volatile("// __lsui_futex_atomic_" #op "\n" \
+ __LSUI_PREAMBLE \
+"1: " #asm_op "al %w[oparg], %w[oldval], %[uaddr]\n" \
+"2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret]) \
+ : [ret] "+r" (ret), [uaddr] "+Q" (*uaddr), \
+ [oldval] "=r" (oldval) \
+ : [oparg] "r" (oparg) \
+ : "memory"); \
+ \
+ uaccess_ttbr0_disable(); \
+ \
+ if (!ret) \
+ *oval = oldval; \
+ return ret; \
+}
+
+LSUI_FUTEX_ATOMIC_OP(add, ldtadd)
+LSUI_FUTEX_ATOMIC_OP(or, ldtset)
+LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr)
+LSUI_FUTEX_ATOMIC_OP(set, swpt)
+
+static __always_inline int
+__lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval)
+{
+ int ret = 0;
+
+ uaccess_ttbr0_enable();
+
+ asm volatile("// __lsui_cmpxchg64\n"
+ __LSUI_PREAMBLE
+"1: casalt %[oldval], %[newval], %[uaddr]\n"
+"2:\n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w[ret])
+ : [ret] "+r" (ret), [uaddr] "+Q" (*uaddr),
+ [oldval] "+r" (*oldval)
+ : [newval] "r" (newval)
+ : "memory");
+
+ uaccess_ttbr0_disable();
+
+ return ret;
+}
+
+static __always_inline int
+__lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ u64 __user *uaddr64;
+ bool futex_pos, other_pos;
+ u32 other, orig_other;
+ union {
+ u32 futex[2];
+ u64 raw;
+ } oval64, orig64, nval64;
+
+ uaddr64 = (u64 __user *)PTR_ALIGN_DOWN(uaddr, sizeof(u64));
+ futex_pos = !IS_ALIGNED((unsigned long)uaddr, sizeof(u64));
+ other_pos = !futex_pos;
+
+ oval64.futex[futex_pos] = oldval;
+ if (get_user(oval64.futex[other_pos], (u32 __user *)uaddr64 + other_pos))
+ return -EFAULT;
+
+ orig64.raw = oval64.raw;
+
+ nval64.futex[futex_pos] = newval;
+ nval64.futex[other_pos] = oval64.futex[other_pos];
+
+ if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw))
+ return -EFAULT;
+
+ oldval = oval64.futex[futex_pos];
+ other = oval64.futex[other_pos];
+ orig_other = orig64.futex[other_pos];
+
+ if (other != orig_other)
+ return -EAGAIN;
+
+ *oval = oldval;
+
+ return 0;
+}
+
+static __always_inline int
+__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
+{
+ /*
+ * Undo the bitwise negation applied to the oparg passed from
+ * arch_futex_atomic_op_inuser() with FUTEX_OP_ANDN.
+ */
+ return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
+}
+
+static __always_inline int
+__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
+{
+ u32 oldval, newval, val;
+ int ret, i;
+
+ if (get_user(oldval, uaddr))
+ return -EFAULT;
+
+ /*
+ * there are no ldteor/stteor instructions...
+ */
+ for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
+ newval = oldval ^ oparg;
+
+ ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val);
+ switch (ret) {
+ case -EFAULT:
+ return ret;
+ case -EAGAIN:
+ continue;
+ }
+
+ if (val == oldval) {
+ *oval = val;
+ return 0;
+ }
+
+ oldval = val;
+ }
+
+ return -EAGAIN;
+}
+
+static __always_inline int
+__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ /*
+ * Callers of futex_atomic_cmpxchg_inatomic() already retry on
+ * -EAGAIN, no need for another loop of max retries.
+ */
+ return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
+}
+#endif /* CONFIG_ARM64_LSUI */
+
+
+#define FUTEX_ATOMIC_OP(op) \
+static __always_inline int \
+__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
+{ \
+ return __lsui_llsc_body(futex_atomic_##op, oparg, uaddr, oval); \
+}
+
+FUTEX_ATOMIC_OP(add)
+FUTEX_ATOMIC_OP(or)
+FUTEX_ATOMIC_OP(and)
+FUTEX_ATOMIC_OP(eor)
+FUTEX_ATOMIC_OP(set)
+
+static __always_inline int
+__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+ return __lsui_llsc_body(futex_cmpxchg, uaddr, oldval, newval, oval);
+}
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
- int oldval = 0, ret, tmp;
- u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
+ int ret;
+ u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32)))
return -EFAULT;
+ uaddr = __uaccess_mask_ptr(_uaddr);
+
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("mov %w3, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_set(oparg, uaddr, oval);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("add %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_add(oparg, uaddr, oval);
break;
case FUTEX_OP_OR:
- __futex_atomic_op("orr %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_or(oparg, uaddr, oval);
break;
case FUTEX_OP_ANDN:
- __futex_atomic_op("and %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, ~oparg);
+ ret = __futex_atomic_and(~oparg, uaddr, oval);
break;
case FUTEX_OP_XOR:
- __futex_atomic_op("eor %w3, %w1, %w5",
- ret, oldval, uaddr, tmp, oparg);
+ ret = __futex_atomic_eor(oparg, uaddr, oval);
break;
default:
ret = -ENOSYS;
}
- if (!ret)
- *oval = oldval;
-
return ret;
}
@@ -81,40 +302,14 @@ static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
- int ret = 0;
- unsigned int loops = FUTEX_MAX_LOOPS;
- u32 val, tmp;
u32 __user *uaddr;
if (!access_ok(_uaddr, sizeof(u32)))
return -EFAULT;
uaddr = __uaccess_mask_ptr(_uaddr);
- uaccess_enable_privileged();
- asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-" prfm pstl1strm, %2\n"
-"1: ldxr %w1, %2\n"
-" sub %w3, %w1, %w5\n"
-" cbnz %w3, 4f\n"
-"2: stlxr %w3, %w6, %2\n"
-" cbz %w3, 3f\n"
-" sub %w4, %w4, %w3\n"
-" cbnz %w4, 1b\n"
-" mov %w0, %w7\n"
-"3:\n"
-" dmb ish\n"
-"4:\n"
- _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0)
- _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0)
- : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
- : "r" (oldval), "r" (newval), "Ir" (-EAGAIN)
- : "memory");
- uaccess_disable_privileged();
- if (!ret)
- *uval = val;
-
- return ret;
+ return __futex_cmpxchg(uaddr, oldval, newval, uval);
}
#endif /* __ASM_FUTEX_H */
diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h
index f50660603ecf..8fa0707069e8 100644
--- a/arch/arm64/include/asm/gcs.h
+++ b/arch/arm64/include/asm/gcs.h
@@ -21,7 +21,7 @@ static inline void gcsstr(u64 *addr, u64 val)
register u64 *_addr __asm__ ("x0") = addr;
register long _val __asm__ ("x1") = val;
- /* GCSSTTR x1, x0 */
+ /* GCSSTTR x1, [x0] */
asm volatile(
".inst 0xd91f1c01\n"
:
@@ -58,7 +58,7 @@ static inline u64 gcsss2(void)
static inline bool task_gcs_el0_enabled(struct task_struct *task)
{
- return current->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
+ return task->thread.gcs_el0_mode & PR_SHADOW_STACK_ENABLE;
}
void gcs_set_el0_mode(struct task_struct *task);
@@ -81,6 +81,82 @@ static inline int gcs_check_locked(struct task_struct *task,
return 0;
}
+static inline int gcssttr(unsigned long __user *addr, unsigned long val)
+{
+ register unsigned long __user *_addr __asm__ ("x0") = addr;
+ register unsigned long _val __asm__ ("x1") = val;
+ int err = 0;
+
+ /* GCSSTTR x1, [x0] */
+ asm volatile(
+ "1: .inst 0xd91f1c01\n"
+ "2: \n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
+ : "+r" (err)
+ : "rZ" (_val), "r" (_addr)
+ : "memory");
+
+ return err;
+}
+
+static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
+ int *err)
+{
+ int ret;
+
+ if (!access_ok((char __user *)addr, sizeof(u64))) {
+ *err = -EFAULT;
+ return;
+ }
+
+ uaccess_ttbr0_enable();
+ ret = gcssttr(addr, val);
+ if (ret != 0)
+ *err = ret;
+ uaccess_ttbr0_disable();
+}
+
+static inline void push_user_gcs(unsigned long val, int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+
+ gcspr -= sizeof(u64);
+ put_user_gcs(val, (unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr, SYS_GCSPR_EL0);
+}
+
+/*
+ * Unlike put/push_user_gcs() above, get/pop_user_gsc() doesn't
+ * validate the GCS permission is set on the page being read. This
+ * differs from how the hardware works when it consumes data stored at
+ * GCSPR. Callers should ensure this is acceptable.
+ */
+static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
+{
+ unsigned long ret;
+ u64 load = 0;
+
+ /* Ensure previous GCS operation are visible before we read the page */
+ gcsb_dsync();
+ ret = copy_from_user(&load, addr, sizeof(load));
+ if (ret != 0)
+ *err = ret;
+ return load;
+}
+
+static inline u64 pop_user_gcs(int *err)
+{
+ u64 gcspr = read_sysreg_s(SYS_GCSPR_EL0);
+ u64 read_val;
+
+ read_val = get_user_gcs((__force unsigned long __user *)gcspr, err);
+ if (!*err)
+ write_sysreg_s(gcspr + sizeof(u64), SYS_GCSPR_EL0);
+
+ return read_val;
+}
+
#else
static inline bool task_gcs_el0_enabled(struct task_struct *task)
@@ -91,6 +167,10 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
static inline void gcs_set_el0_mode(struct task_struct *task) { }
static inline void gcs_free(struct task_struct *task) { }
static inline void gcs_preserve_current_state(void) { }
+static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
+ int *err) { }
+static inline void push_user_gcs(unsigned long val, int *err) { }
+
static inline unsigned long gcs_alloc_thread_stack(struct task_struct *tsk,
const struct kernel_clone_args *args)
{
@@ -101,6 +181,15 @@ static inline int gcs_check_locked(struct task_struct *task,
{
return 0;
}
+static inline u64 get_user_gcs(unsigned long __user *addr, int *err)
+{
+ *err = -EFAULT;
+ return 0;
+}
+static inline u64 pop_user_gcs(int *err)
+{
+ return 0;
+}
#endif
diff --git a/arch/arm64/include/asm/gpr-num.h b/arch/arm64/include/asm/gpr-num.h
index 05da4a7c5788..a114e4f8209b 100644
--- a/arch/arm64/include/asm/gpr-num.h
+++ b/arch/arm64/include/asm/gpr-num.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
.equ .L__gpr_num_x\num, \num
@@ -11,7 +11,7 @@
.equ .L__gpr_num_xzr, 31
.equ .L__gpr_num_wzr, 31
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __DEFINE_ASM_GPR_NUMS \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
@@ -21,6 +21,6 @@
" .equ .L__gpr_num_xzr, 31\n" \
" .equ .L__gpr_num_wzr, 31\n"
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index cbfa7b6f2e09..77d6b8c63d4e 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -41,7 +41,7 @@ do { \
\
___hcr = read_sysreg(hcr_el2); \
if (!(___hcr & HCR_TGE)) { \
- write_sysreg(___hcr | HCR_TGE, hcr_el2); \
+ write_sysreg_hcr(___hcr | HCR_TGE); \
isb(); \
} \
/* \
@@ -82,7 +82,7 @@ do { \
*/ \
barrier(); \
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
- write_sysreg(___hcr, hcr_el2); \
+ write_sysreg_hcr(___hcr); \
} while (0)
static inline void ack_bad_irq(unsigned int irq)
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index c6dff3e69539..d038ff14d16c 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_dcache_clean, &folio->flags);
+ clear_bit(PG_dcache_clean, &folio->flags.f);
#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
- clear_bit(PG_mte_tagged, &folio->flags);
- clear_bit(PG_mte_lock, &folio->flags);
+ clear_bit(PG_mte_tagged, &folio->flags.f);
+ clear_bit(PG_mte_lock, &folio->flags.f);
}
#endif
}
@@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
-extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep);
+extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
@@ -56,8 +56,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-void __init arm64_hugetlb_cma_reserve(void);
-
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
@@ -69,6 +67,30 @@ extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
#include <asm-generic/hugetlb.h>
+static inline void __flush_hugetlb_tlb_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long stride,
+ tlbf_t flags)
+{
+ switch (stride) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ __flush_tlb_range(vma, start, end, PUD_SIZE, 1, flags);
+ break;
+#endif
+ case CONT_PMD_SIZE:
+ case PMD_SIZE:
+ __flush_tlb_range(vma, start, end, PMD_SIZE, 2, flags);
+ break;
+ case CONT_PTE_SIZE:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, 3, flags);
+ break;
+ default:
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, flags);
+ }
+}
+
#define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start,
@@ -76,12 +98,7 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
{
unsigned long stride = huge_page_size(hstate_vma(vma));
- if (stride == PMD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 2);
- else if (stride == PUD_SIZE)
- __flush_tlb_range(vma, start, end, stride, false, 1);
- else
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
+ __flush_hugetlb_tlb_range(vma, start, end, stride, TLBF_NONE);
}
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 2b6c61c608e2..abe8218b2325 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -46,7 +46,7 @@
#define COMPAT_HWCAP2_SB (1 << 5)
#define COMPAT_HWCAP2_SSBS (1 << 6)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/log2.h>
/*
@@ -60,108 +60,11 @@
* of KERNEL_HWCAP_{feature}.
*/
#define __khwcap_feature(x) const_ilog2(HWCAP_ ## x)
-#define KERNEL_HWCAP_FP __khwcap_feature(FP)
-#define KERNEL_HWCAP_ASIMD __khwcap_feature(ASIMD)
-#define KERNEL_HWCAP_EVTSTRM __khwcap_feature(EVTSTRM)
-#define KERNEL_HWCAP_AES __khwcap_feature(AES)
-#define KERNEL_HWCAP_PMULL __khwcap_feature(PMULL)
-#define KERNEL_HWCAP_SHA1 __khwcap_feature(SHA1)
-#define KERNEL_HWCAP_SHA2 __khwcap_feature(SHA2)
-#define KERNEL_HWCAP_CRC32 __khwcap_feature(CRC32)
-#define KERNEL_HWCAP_ATOMICS __khwcap_feature(ATOMICS)
-#define KERNEL_HWCAP_FPHP __khwcap_feature(FPHP)
-#define KERNEL_HWCAP_ASIMDHP __khwcap_feature(ASIMDHP)
-#define KERNEL_HWCAP_CPUID __khwcap_feature(CPUID)
-#define KERNEL_HWCAP_ASIMDRDM __khwcap_feature(ASIMDRDM)
-#define KERNEL_HWCAP_JSCVT __khwcap_feature(JSCVT)
-#define KERNEL_HWCAP_FCMA __khwcap_feature(FCMA)
-#define KERNEL_HWCAP_LRCPC __khwcap_feature(LRCPC)
-#define KERNEL_HWCAP_DCPOP __khwcap_feature(DCPOP)
-#define KERNEL_HWCAP_SHA3 __khwcap_feature(SHA3)
-#define KERNEL_HWCAP_SM3 __khwcap_feature(SM3)
-#define KERNEL_HWCAP_SM4 __khwcap_feature(SM4)
-#define KERNEL_HWCAP_ASIMDDP __khwcap_feature(ASIMDDP)
-#define KERNEL_HWCAP_SHA512 __khwcap_feature(SHA512)
-#define KERNEL_HWCAP_SVE __khwcap_feature(SVE)
-#define KERNEL_HWCAP_ASIMDFHM __khwcap_feature(ASIMDFHM)
-#define KERNEL_HWCAP_DIT __khwcap_feature(DIT)
-#define KERNEL_HWCAP_USCAT __khwcap_feature(USCAT)
-#define KERNEL_HWCAP_ILRCPC __khwcap_feature(ILRCPC)
-#define KERNEL_HWCAP_FLAGM __khwcap_feature(FLAGM)
-#define KERNEL_HWCAP_SSBS __khwcap_feature(SSBS)
-#define KERNEL_HWCAP_SB __khwcap_feature(SB)
-#define KERNEL_HWCAP_PACA __khwcap_feature(PACA)
-#define KERNEL_HWCAP_PACG __khwcap_feature(PACG)
-#define KERNEL_HWCAP_GCS __khwcap_feature(GCS)
-
#define __khwcap2_feature(x) (const_ilog2(HWCAP2_ ## x) + 64)
-#define KERNEL_HWCAP_DCPODP __khwcap2_feature(DCPODP)
-#define KERNEL_HWCAP_SVE2 __khwcap2_feature(SVE2)
-#define KERNEL_HWCAP_SVEAES __khwcap2_feature(SVEAES)
-#define KERNEL_HWCAP_SVEPMULL __khwcap2_feature(SVEPMULL)
-#define KERNEL_HWCAP_SVEBITPERM __khwcap2_feature(SVEBITPERM)
-#define KERNEL_HWCAP_SVESHA3 __khwcap2_feature(SVESHA3)
-#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
-#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
-#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
-#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM)
-#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM)
-#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM)
-#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16)
-#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM)
-#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
-#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
-#define KERNEL_HWCAP_RNG __khwcap2_feature(RNG)
-#define KERNEL_HWCAP_BTI __khwcap2_feature(BTI)
-#define KERNEL_HWCAP_MTE __khwcap2_feature(MTE)
-#define KERNEL_HWCAP_ECV __khwcap2_feature(ECV)
-#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
-#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
-#define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3)
-#define KERNEL_HWCAP_SME __khwcap2_feature(SME)
-#define KERNEL_HWCAP_SME_I16I64 __khwcap2_feature(SME_I16I64)
-#define KERNEL_HWCAP_SME_F64F64 __khwcap2_feature(SME_F64F64)
-#define KERNEL_HWCAP_SME_I8I32 __khwcap2_feature(SME_I8I32)
-#define KERNEL_HWCAP_SME_F16F32 __khwcap2_feature(SME_F16F32)
-#define KERNEL_HWCAP_SME_B16F32 __khwcap2_feature(SME_B16F32)
-#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32)
-#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
-#define KERNEL_HWCAP_WFXT __khwcap2_feature(WFXT)
-#define KERNEL_HWCAP_EBF16 __khwcap2_feature(EBF16)
-#define KERNEL_HWCAP_SVE_EBF16 __khwcap2_feature(SVE_EBF16)
-#define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC)
-#define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM)
-#define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1)
-#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2)
-#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1)
-#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32)
-#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32)
-#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16)
-#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16)
-#define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS)
-#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC)
-#define KERNEL_HWCAP_SVE_B16B16 __khwcap2_feature(SVE_B16B16)
-#define KERNEL_HWCAP_LRCPC3 __khwcap2_feature(LRCPC3)
-#define KERNEL_HWCAP_LSE128 __khwcap2_feature(LSE128)
-#define KERNEL_HWCAP_FPMR __khwcap2_feature(FPMR)
-#define KERNEL_HWCAP_LUT __khwcap2_feature(LUT)
-#define KERNEL_HWCAP_FAMINMAX __khwcap2_feature(FAMINMAX)
-#define KERNEL_HWCAP_F8CVT __khwcap2_feature(F8CVT)
-#define KERNEL_HWCAP_F8FMA __khwcap2_feature(F8FMA)
-#define KERNEL_HWCAP_F8DP4 __khwcap2_feature(F8DP4)
-#define KERNEL_HWCAP_F8DP2 __khwcap2_feature(F8DP2)
-#define KERNEL_HWCAP_F8E4M3 __khwcap2_feature(F8E4M3)
-#define KERNEL_HWCAP_F8E5M2 __khwcap2_feature(F8E5M2)
-#define KERNEL_HWCAP_SME_LUTV2 __khwcap2_feature(SME_LUTV2)
-#define KERNEL_HWCAP_SME_F8F16 __khwcap2_feature(SME_F8F16)
-#define KERNEL_HWCAP_SME_F8F32 __khwcap2_feature(SME_F8F32)
-#define KERNEL_HWCAP_SME_SF8FMA __khwcap2_feature(SME_SF8FMA)
-#define KERNEL_HWCAP_SME_SF8DP4 __khwcap2_feature(SME_SF8DP4)
-#define KERNEL_HWCAP_SME_SF8DP2 __khwcap2_feature(SME_SF8DP2)
-#define KERNEL_HWCAP_POE __khwcap2_feature(POE)
-
#define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128)
+#include "asm/kernel-hwcap.h"
+
/*
* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
diff --git a/arch/arm64/include/asm/hyperv-tlfs.h b/arch/arm64/include/asm/hyperv-tlfs.h
deleted file mode 100644
index bc30aadedfe9..000000000000
--- a/arch/arm64/include/asm/hyperv-tlfs.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file contains definitions from the Hyper-V Hypervisor Top-Level
- * Functional Specification (TLFS):
- * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
- *
- * Copyright (C) 2021, Microsoft, Inc.
- *
- * Author : Michael Kelley <mikelley@microsoft.com>
- */
-
-#ifndef _ASM_HYPERV_TLFS_H
-#define _ASM_HYPERV_TLFS_H
-
-#include <linux/types.h>
-
-/*
- * All data structures defined in the TLFS that are shared between Hyper-V
- * and a guest VM use Little Endian byte ordering. This matches the default
- * byte ordering of Linux running on ARM64, so no special handling is required.
- */
-
-/*
- * Group C Features. See the asm-generic version of hyperv-tlfs.h
- * for a description of Feature Groups.
- */
-
-/* Crash MSRs available */
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(8)
-
-/* STIMER direct mode is available */
-#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
-
-/*
- * To support arch-generic code calling hv_set/get_register:
- * - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
- * - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
- */
-#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
-#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
-#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
-#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
-#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
-#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
-
-#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
-#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
-#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
-
-#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
-#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
-#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
-#define HV_MSR_SIMP (HV_REGISTER_SIMP)
-#define HV_MSR_EOM (HV_REGISTER_EOM)
-
-#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
-#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
-
-union hv_msi_entry {
- u64 as_uint64[2];
- struct {
- u64 address;
- u32 data;
- u32 reserved;
- } __packed;
-};
-
-#include <asm-generic/hyperv-tlfs.h>
-
-#endif
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index 409e239834d1..a12fd897c877 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -6,6 +6,7 @@
void kvm_init_hyp_services(void);
bool kvm_arm_hyp_service_available(u32 func_id);
+void kvm_arm_target_impl_cpu_init(void);
#ifdef CONFIG_ARM_PKVM_GUEST
void pkvm_init_hyp_services(void);
diff --git a/arch/arm64/include/asm/image.h b/arch/arm64/include/asm/image.h
index c09cf942dc92..9ba85173f857 100644
--- a/arch/arm64/include/asm/image.h
+++ b/arch/arm64/include/asm/image.h
@@ -20,7 +20,7 @@
#define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3
#define ARM64_IMAGE_FLAG_PHYS_BASE 1
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#define arm64_image_flag_field(flags, field) \
(((flags) >> field##_SHIFT) & field##_MASK)
@@ -54,6 +54,6 @@ struct arm64_image_header {
__le32 res5;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_IMAGE_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index e390c432f546..f463a654a2bb 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -12,7 +12,7 @@
#include <asm/insn-def.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
enum aarch64_insn_hint_cr_op {
AARCH64_INSN_HINT_NOP = 0x0 << 5,
@@ -188,8 +188,10 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX,
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
+ AARCH64_INSN_LDST_LOAD_ACQ,
AARCH64_INSN_LDST_LOAD_EX,
AARCH64_INSN_LDST_LOAD_ACQ_EX,
+ AARCH64_INSN_LDST_STORE_REL,
AARCH64_INSN_LDST_STORE_EX,
AARCH64_INSN_LDST_STORE_REL_EX,
AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET,
@@ -351,8 +353,10 @@ __AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
-__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
-__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
+__AARCH64_INSN_FUNCS(load_acq, 0x3FDFFC00, 0x08DFFC00)
+__AARCH64_INSN_FUNCS(store_rel, 0x3FDFFC00, 0x089FFC00)
+__AARCH64_INSN_FUNCS(load_ex, 0x3FC00000, 0x08400000)
+__AARCH64_INSN_FUNCS(store_ex, 0x3FC00000, 0x08000000)
__AARCH64_INSN_FUNCS(mops, 0x3B200C00, 0x19000400)
__AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000)
__AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000)
@@ -602,6 +606,10 @@ u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
int offset,
enum aarch64_insn_variant variant,
enum aarch64_insn_ldst_type type);
+u32 aarch64_insn_gen_load_acq_store_rel(enum aarch64_insn_register reg,
+ enum aarch64_insn_register base,
+ enum aarch64_insn_size_type size,
+ enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register base,
enum aarch64_insn_register state,
@@ -663,7 +671,6 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
enum aarch64_insn_register Rn,
enum aarch64_insn_register Rd,
u8 lsb);
-#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
@@ -675,29 +682,8 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order);
-#else
-static inline
-u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size,
- enum aarch64_insn_mem_atomic_op op,
- enum aarch64_insn_mem_order_type order)
-{
- return AARCH64_BREAK_FAULT;
-}
-
-static inline
-u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
- enum aarch64_insn_register address,
- enum aarch64_insn_register value,
- enum aarch64_insn_size_type size,
- enum aarch64_insn_mem_order_type order)
-{
- return AARCH64_BREAK_FAULT;
-}
-#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
+u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
enum aarch64_insn_system_register sysreg);
@@ -721,6 +707,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn);
typedef bool (pstate_check_t)(unsigned long);
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 76ebbdc6ffdd..8cbd1e96fd50 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -264,15 +264,33 @@ __iowrite64_copy(void __iomem *to, const void *from, size_t count)
typedef int (*ioremap_prot_hook_t)(phys_addr_t phys_addr, size_t size,
pgprot_t *prot);
int arm64_ioremap_prot_hook_register(const ioremap_prot_hook_t hook);
+void __iomem *__ioremap_prot(phys_addr_t phys, size_t size, pgprot_t prot);
-#define ioremap_prot ioremap_prot
+static inline void __iomem *ioremap_prot(phys_addr_t phys, size_t size,
+ pgprot_t user_prot)
+{
+ pgprot_t prot;
+ ptdesc_t user_prot_val = pgprot_val(user_prot);
-#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+ if (WARN_ON_ONCE(!(user_prot_val & PTE_USER)))
+ return NULL;
+ prot = __pgprot_modify(PAGE_KERNEL, PTE_ATTRINDX_MASK,
+ user_prot_val & PTE_ATTRINDX_MASK);
+ return __ioremap_prot(phys, size, prot);
+}
+#define ioremap_prot ioremap_prot
+
+#define ioremap(addr, size) \
+ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) \
- ioremap_prot((addr), (size), PROT_NORMAL_NC)
+ __ioremap_prot((addr), (size), __pgprot(PROT_NORMAL_NC))
#define ioremap_np(addr, size) \
- ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
+ __ioremap_prot((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+
+
+#define ioremap_encrypted(addr, size) \
+ __ioremap_prot((addr), (size), PAGE_KERNEL)
/*
* io{read,write}{16,32,64}be() macros
@@ -293,7 +311,7 @@ static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
if (pfn_is_map_memory(__phys_to_pfn(addr)))
return (void __iomem *)__phys_to_virt(addr);
- return ioremap_prot(addr, size, PROT_NORMAL);
+ return __ioremap_prot(addr, size, __pgprot(PROT_NORMAL));
}
/*
@@ -311,7 +329,7 @@ extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
static inline bool arm64_is_protected_mmio(phys_addr_t phys_addr, size_t size)
{
if (unlikely(is_realm_world()))
- return __arm64_is_protected_mmio(phys_addr, size);
+ return arm64_rsi_is_protected(phys_addr, size);
return false;
}
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 424ed421cd97..0cb211d3607d 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -8,7 +8,7 @@
#ifndef __ASM_JUMP_LABEL_H
#define __ASM_JUMP_LABEL_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
#include <asm/insn.h>
@@ -58,5 +58,5 @@ l_yes:
return true;
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index e1b57c13f8a4..b167e9d3da91 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -2,7 +2,7 @@
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index fd5a08450b12..74a4f738c5f5 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -45,11 +45,11 @@
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
-#define EARLY_ENTRIES(vstart, vend, shift, add) \
- (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
+#define EARLY_ENTRIES(lvl, vstart, vend) \
+ SPAN_NR_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * PTDESC_TABLE_SHIFT)
-#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
- (lvls > lvl ? EARLY_ENTRIES(vstart, vend, SWAPPER_BLOCK_SHIFT + lvl * (PAGE_SHIFT - 3), add) : 0)
+#define EARLY_LEVEL(lvl, lvls, vstart, vend, add) \
+ ((lvls) > (lvl) ? EARLY_ENTRIES(lvl, vstart, vend) + (add) : 0)
#define EARLY_PAGES(lvls, vstart, vend, add) (1 /* PGDIR page */ \
+ EARLY_LEVEL(3, (lvls), (vstart), (vend), add) /* each entry needs a next level page table */ \
@@ -58,7 +58,7 @@
#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(SWAPPER_PGTABLE_LEVELS, KIMAGE_VADDR, _end, EXTRA_PAGE) \
+ EARLY_SEGMENT_EXTRA_PAGES))
-#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, _end, 1))
+#define INIT_IDMAP_DIR_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, KIMAGE_VADDR, kimage_limit, 1))
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + EARLY_IDMAP_EXTRA_PAGES) * PAGE_SIZE)
#define INIT_IDMAP_FDT_PAGES (EARLY_PAGES(INIT_IDMAP_PGTABLE_LEVELS, 0UL, UL(MAX_FDT_SIZE), 1) - 1)
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 4d9cc7a76d9c..892e5bebda95 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -25,7 +25,7 @@
#define KEXEC_ARCH KEXEC_ARCH_AARCH64
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/**
* crash_setup_regs() - save registers for the panic kernel
@@ -130,6 +130,6 @@ extern int load_other_segments(struct kimage *image,
char *cmdline);
#endif
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h
index a81937fae9f6..21dbc9dda747 100644
--- a/arch/arm64/include/asm/kfence.h
+++ b/arch/arm64/include/asm/kfence.h
@@ -10,8 +10,6 @@
#include <asm/set_memory.h>
-static inline bool arch_kfence_init_pool(void) { return true; }
-
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
set_memory_valid(addr, 1, !protect);
@@ -25,6 +23,7 @@ static inline bool arm64_kfence_can_set_direct_map(void)
{
return !kfence_early_init;
}
+bool arch_kfence_init_pool(void);
#else /* CONFIG_KFENCE */
static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
#endif /* CONFIG_KFENCE */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
index 21fc85e9d2be..67ef1c5532ae 100644
--- a/arch/arm64/include/asm/kgdb.h
+++ b/arch/arm64/include/asm/kgdb.h
@@ -14,7 +14,7 @@
#include <linux/ptrace.h>
#include <asm/debug-monitors.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void arch_kgdb_breakpoint(void)
{
@@ -24,7 +24,19 @@ static inline void arch_kgdb_breakpoint(void)
extern void kgdb_handle_bus_error(void);
extern int kgdb_fault_expected;
-#endif /* !__ASSEMBLY__ */
+int kgdb_brk_handler(struct pt_regs *regs, unsigned long esr);
+int kgdb_compiled_brk_handler(struct pt_regs *regs, unsigned long esr);
+#ifdef CONFIG_KGDB
+int kgdb_single_step_handler(struct pt_regs *regs, unsigned long esr);
+#else
+static inline int kgdb_single_step_handler(struct pt_regs *regs,
+ unsigned long esr)
+{
+ return DBG_HOOK_ERROR;
+}
+#endif
+
+#endif /* !__ASSEMBLER__ */
/*
* gdb remote procotol (well most versions of it) expects the following
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h
index be7a3680dadf..f2782560647b 100644
--- a/arch/arm64/include/asm/kprobes.h
+++ b/arch/arm64/include/asm/kprobes.h
@@ -41,4 +41,12 @@ void __kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
+
+int __kprobes kprobe_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+int __kprobes kprobe_ss_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+int __kprobes kretprobe_brk_handler(struct pt_regs *regs,
+ unsigned long esr);
+
#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3e0f0de1d2da..3f9233b5a130 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,67 +12,70 @@
#include <asm/sysreg.h>
#include <asm/types.h>
-/* Hyp Configuration Register (HCR) bits */
-
-#define HCR_TID5 (UL(1) << 58)
-#define HCR_DCT (UL(1) << 57)
-#define HCR_ATA_SHIFT 56
-#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
-#define HCR_TTLBOS (UL(1) << 55)
-#define HCR_TTLBIS (UL(1) << 54)
-#define HCR_ENSCXT (UL(1) << 53)
-#define HCR_TOCU (UL(1) << 52)
-#define HCR_AMVOFFEN (UL(1) << 51)
-#define HCR_TICAB (UL(1) << 50)
-#define HCR_TID4 (UL(1) << 49)
-#define HCR_FIEN (UL(1) << 47)
-#define HCR_FWB (UL(1) << 46)
-#define HCR_NV2 (UL(1) << 45)
-#define HCR_AT (UL(1) << 44)
-#define HCR_NV1 (UL(1) << 43)
-#define HCR_NV (UL(1) << 42)
-#define HCR_API (UL(1) << 41)
-#define HCR_APK (UL(1) << 40)
-#define HCR_TEA (UL(1) << 37)
-#define HCR_TERR (UL(1) << 36)
-#define HCR_TLOR (UL(1) << 35)
-#define HCR_E2H (UL(1) << 34)
-#define HCR_ID (UL(1) << 33)
-#define HCR_CD (UL(1) << 32)
-#define HCR_RW_SHIFT 31
-#define HCR_RW (UL(1) << HCR_RW_SHIFT)
-#define HCR_TRVM (UL(1) << 30)
-#define HCR_HCD (UL(1) << 29)
-#define HCR_TDZ (UL(1) << 28)
-#define HCR_TGE (UL(1) << 27)
-#define HCR_TVM (UL(1) << 26)
-#define HCR_TTLB (UL(1) << 25)
-#define HCR_TPU (UL(1) << 24)
-#define HCR_TPC (UL(1) << 23) /* HCR_TPCP if FEAT_DPB */
-#define HCR_TSW (UL(1) << 22)
-#define HCR_TACR (UL(1) << 21)
-#define HCR_TIDCP (UL(1) << 20)
-#define HCR_TSC (UL(1) << 19)
-#define HCR_TID3 (UL(1) << 18)
-#define HCR_TID2 (UL(1) << 17)
-#define HCR_TID1 (UL(1) << 16)
-#define HCR_TID0 (UL(1) << 15)
-#define HCR_TWE (UL(1) << 14)
-#define HCR_TWI (UL(1) << 13)
-#define HCR_DC (UL(1) << 12)
-#define HCR_BSU (3 << 10)
-#define HCR_BSU_IS (UL(1) << 10)
-#define HCR_FB (UL(1) << 9)
-#define HCR_VSE (UL(1) << 8)
-#define HCR_VI (UL(1) << 7)
-#define HCR_VF (UL(1) << 6)
-#define HCR_AMO (UL(1) << 5)
-#define HCR_IMO (UL(1) << 4)
-#define HCR_FMO (UL(1) << 3)
-#define HCR_PTW (UL(1) << 2)
-#define HCR_SWIO (UL(1) << 1)
-#define HCR_VM (UL(1) << 0)
-#define HCR_RES0 ((UL(1) << 48) | (UL(1) << 39))
+/*
+ * Because I'm terribly lazy and that repainting the whole of the KVM
+ * code with the proper names is a pain, use a helper to map the names
+ * inherited from AArch32 with the new fancy nomenclature. One day...
+ */
+#define __HCR(x) HCR_EL2_##x
+
+#define HCR_TID5 __HCR(TID5)
+#define HCR_DCT __HCR(DCT)
+#define HCR_ATA_SHIFT __HCR(ATA_SHIFT)
+#define HCR_ATA __HCR(ATA)
+#define HCR_TTLBOS __HCR(TTLBOS)
+#define HCR_TTLBIS __HCR(TTLBIS)
+#define HCR_ENSCXT __HCR(EnSCXT)
+#define HCR_TOCU __HCR(TOCU)
+#define HCR_AMVOFFEN __HCR(AMVOFFEN)
+#define HCR_TICAB __HCR(TICAB)
+#define HCR_TID4 __HCR(TID4)
+#define HCR_FIEN __HCR(FIEN)
+#define HCR_FWB __HCR(FWB)
+#define HCR_NV2 __HCR(NV2)
+#define HCR_AT __HCR(AT)
+#define HCR_NV1 __HCR(NV1)
+#define HCR_NV __HCR(NV)
+#define HCR_API __HCR(API)
+#define HCR_APK __HCR(APK)
+#define HCR_TEA __HCR(TEA)
+#define HCR_TERR __HCR(TERR)
+#define HCR_TLOR __HCR(TLOR)
+#define HCR_E2H __HCR(E2H)
+#define HCR_ID __HCR(ID)
+#define HCR_CD __HCR(CD)
+#define HCR_RW __HCR(RW)
+#define HCR_TRVM __HCR(TRVM)
+#define HCR_HCD __HCR(HCD)
+#define HCR_TDZ __HCR(TDZ)
+#define HCR_TGE __HCR(TGE)
+#define HCR_TVM __HCR(TVM)
+#define HCR_TTLB __HCR(TTLB)
+#define HCR_TPU __HCR(TPU)
+#define HCR_TPC __HCR(TPCP)
+#define HCR_TSW __HCR(TSW)
+#define HCR_TACR __HCR(TACR)
+#define HCR_TIDCP __HCR(TIDCP)
+#define HCR_TSC __HCR(TSC)
+#define HCR_TID3 __HCR(TID3)
+#define HCR_TID2 __HCR(TID2)
+#define HCR_TID1 __HCR(TID1)
+#define HCR_TID0 __HCR(TID0)
+#define HCR_TWE __HCR(TWE)
+#define HCR_TWI __HCR(TWI)
+#define HCR_DC __HCR(DC)
+#define HCR_BSU __HCR(BSU)
+#define HCR_BSU_IS __HCR(BSU_IS)
+#define HCR_FB __HCR(FB)
+#define HCR_VSE __HCR(VSE)
+#define HCR_VI __HCR(VI)
+#define HCR_VF __HCR(VF)
+#define HCR_AMO __HCR(AMO)
+#define HCR_IMO __HCR(IMO)
+#define HCR_FMO __HCR(FMO)
+#define HCR_PTW __HCR(PTW)
+#define HCR_SWIO __HCR(SWIO)
+#define HCR_VM __HCR(VM)
/*
* The bits we set in HCR:
@@ -92,23 +95,23 @@
* SWIO: Turn set/way invalidates into set/way clean+invalidate
* PTW: Take a stage2 fault if a stage1 walk steps in device memory
* TID3: Trap EL1 reads of group 3 ID registers
- * TID2: Trap CTR_EL0, CCSIDR2_EL1, CLIDR_EL1, and CSSELR_EL1
+ * TID1: Trap REVIDR_EL1, AIDR_EL1, and SMIDR_EL1
*/
#define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
- HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
-#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
+ HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3 | HCR_TID1)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
-#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
+#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H | HCR_AMO | HCR_IMO | HCR_FMO)
-#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
#define MPAMHCR_HOST_FLAGS 0
/* TCR_EL2 Registers bits */
#define TCR_EL2_DS (1UL << 32)
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_HPD (1 << 24)
+#define TCR_EL2_HA (1 << 21)
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@@ -119,39 +122,9 @@
#define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
#define TCR_EL2_T0SZ_MASK 0x3f
#define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \
- TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
-
-/* VTCR_EL2 Registers bits */
-#define VTCR_EL2_DS TCR_EL2_DS
-#define VTCR_EL2_RES1 (1U << 31)
-#define VTCR_EL2_HD (1 << 22)
-#define VTCR_EL2_HA (1 << 21)
-#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
-#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
-#define VTCR_EL2_TG0_MASK TCR_TG0_MASK
-#define VTCR_EL2_TG0_4K TCR_TG0_4K
-#define VTCR_EL2_TG0_16K TCR_TG0_16K
-#define VTCR_EL2_TG0_64K TCR_TG0_64K
-#define VTCR_EL2_SH0_MASK TCR_SH0_MASK
-#define VTCR_EL2_SH0_INNER TCR_SH0_INNER
-#define VTCR_EL2_ORGN0_MASK TCR_ORGN0_MASK
-#define VTCR_EL2_ORGN0_WBWA TCR_ORGN0_WBWA
-#define VTCR_EL2_IRGN0_MASK TCR_IRGN0_MASK
-#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA
-#define VTCR_EL2_SL0_SHIFT 6
-#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT)
-#define VTCR_EL2_T0SZ_MASK 0x3f
-#define VTCR_EL2_VS_SHIFT 19
-#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
-#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
-
-#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
+ TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK)
/*
- * We configure the Stage-2 page tables to always restrict the IPA space to be
- * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
- * not known to exist and will break with this configuration.
- *
* The VTCR_EL2 is configured per VM and is initialised in kvm_init_stage2_mmu.
*
* Note that when using 4K pages, we concatenate two first level page tables
@@ -159,9 +132,6 @@
*
*/
-#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
- VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
-
/*
* VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
* Interestingly, it depends on the page size.
@@ -193,30 +163,35 @@
*/
#ifdef CONFIG_ARM64_64K_PAGES
-#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K
+#define VTCR_EL2_TGRAN 64K
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K
+#define VTCR_EL2_TGRAN 16K
#define VTCR_EL2_TGRAN_SL0_BASE 3UL
#else /* 4K */
-#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K
+#define VTCR_EL2_TGRAN 4K
#define VTCR_EL2_TGRAN_SL0_BASE 2UL
#endif
#define VTCR_EL2_LVLS_TO_SL0(levels) \
- ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
+ FIELD_PREP(VTCR_EL2_SL0, (VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))))
#define VTCR_EL2_SL0_TO_LVLS(sl0) \
((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
#define VTCR_EL2_LVLS(vtcr) \
- VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
+ VTCR_EL2_SL0_TO_LVLS(FIELD_GET(VTCR_EL2_SL0, (vtcr)))
+
+#define VTCR_EL2_FLAGS (SYS_FIELD_PREP_ENUM(VTCR_EL2, SH0, INNER) | \
+ SYS_FIELD_PREP_ENUM(VTCR_EL2, ORGN0, WBWA) | \
+ SYS_FIELD_PREP_ENUM(VTCR_EL2, IRGN0, WBWA) | \
+ SYS_FIELD_PREP_ENUM(VTCR_EL2, TG0, VTCR_EL2_TGRAN) | \
+ VTCR_EL2_RES1)
-#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
-#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
+#define VTCR_EL2_IPA(vtcr) (64 - FIELD_GET(VTCR_EL2_T0SZ, (vtcr)))
/*
* ARM VMSAv8-64 defines an algorithm for finding the translation table
@@ -300,7 +275,7 @@
#define CPTR_EL2_TSM (1 << 12)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
-#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
+#define CPTR_NVHE_EL2_RES1 (BIT(13) | BIT(9) | GENMASK(7, 0))
#define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \
GENMASK(29, 21) | \
GENMASK(19, 14) | \
@@ -313,56 +288,19 @@
GENMASK(15, 0))
/*
- * FGT register definitions
- *
- * RES0 and polarity masks as of DDI0487J.a, to be updated as needed.
- * We're not using the generated masks as they are usually ahead of
- * the published ARM ARM, which we use as a reference.
- *
- * Once we get to a point where the two describe the same thing, we'll
- * merge the definitions. One day.
- */
-#define __HFGRTR_EL2_RES0 HFGxTR_EL2_RES0
-#define __HFGRTR_EL2_MASK GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK ~(__HFGRTR_EL2_RES0 | __HFGRTR_EL2_MASK)
-
-/*
- * The HFGWTR bits are a subset of HFGRTR bits. To ensure we don't miss any
- * future additions, define __HFGWTR* macros relative to __HFGRTR* ones.
+ * Polarity masks for HCRX_EL2, limited to the bits that we know about
+ * at this point in time. It doesn't mean that we actually *handle*
+ * them, but that at least those that are not advertised to a guest
+ * will be RES0 for that guest.
*/
-#define __HFGRTR_ONLY_MASK (BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
- GENMASK(26, 25) | BIT(21) | BIT(18) | \
- GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
-#define __HFGWTR_EL2_RES0 (__HFGRTR_EL2_RES0 | __HFGRTR_ONLY_MASK)
-#define __HFGWTR_EL2_MASK (__HFGRTR_EL2_MASK & ~__HFGRTR_ONLY_MASK)
-#define __HFGWTR_EL2_nMASK ~(__HFGWTR_EL2_RES0 | __HFGWTR_EL2_MASK)
-
-#define __HFGITR_EL2_RES0 HFGITR_EL2_RES0
-#define __HFGITR_EL2_MASK (BIT(62) | BIT(60) | GENMASK(54, 0))
-#define __HFGITR_EL2_nMASK ~(__HFGITR_EL2_RES0 | __HFGITR_EL2_MASK)
-
-#define __HDFGRTR_EL2_RES0 HDFGRTR_EL2_RES0
-#define __HDFGRTR_EL2_MASK (BIT(63) | GENMASK(58, 50) | GENMASK(48, 43) | \
- GENMASK(41, 40) | GENMASK(37, 22) | \
- GENMASK(19, 9) | GENMASK(7, 0))
-#define __HDFGRTR_EL2_nMASK ~(__HDFGRTR_EL2_RES0 | __HDFGRTR_EL2_MASK)
-
-#define __HDFGWTR_EL2_RES0 HDFGWTR_EL2_RES0
-#define __HDFGWTR_EL2_MASK (GENMASK(57, 52) | GENMASK(50, 48) | \
- GENMASK(46, 44) | GENMASK(42, 41) | \
- GENMASK(37, 35) | GENMASK(33, 31) | \
- GENMASK(29, 23) | GENMASK(21, 10) | \
- GENMASK(8, 7) | GENMASK(5, 0))
-#define __HDFGWTR_EL2_nMASK ~(__HDFGWTR_EL2_RES0 | __HDFGWTR_EL2_MASK)
-
-#define __HAFGRTR_EL2_RES0 HAFGRTR_EL2_RES0
-#define __HAFGRTR_EL2_MASK (GENMASK(49, 17) | GENMASK(4, 0))
-#define __HAFGRTR_EL2_nMASK ~(__HAFGRTR_EL2_RES0 | __HAFGRTR_EL2_MASK)
-
-/* Similar definitions for HCRX_EL2 */
-#define __HCRX_EL2_RES0 HCRX_EL2_RES0
-#define __HCRX_EL2_MASK (BIT(6))
-#define __HCRX_EL2_nMASK ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK)
+#define __HCRX_EL2_MASK (BIT_ULL(6))
+#define __HCRX_EL2_nMASK (GENMASK_ULL(24, 14) | \
+ GENMASK_ULL(11, 7) | \
+ GENMASK_ULL(5, 0))
+#define __HCRX_EL2_RES0 ~(__HCRX_EL2_nMASK | __HCRX_EL2_MASK)
+#define __HCRX_EL2_RES1 ~(__HCRX_EL2_nMASK | \
+ __HCRX_EL2_MASK | \
+ __HCRX_EL2_RES0)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
@@ -378,6 +316,8 @@
#define PAR_TO_HPFAR(par) \
(((par) & GENMASK_ULL(52 - 1, 12)) >> 8)
+#define FAR_TO_FIPA_OFFSET(far) ((far) & GENMASK_ULL(11, 0))
+
#define ECN(x) { ESR_ELx_EC_##x, #x }
#define kvm_arm_exception_class \
@@ -391,8 +331,6 @@
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)
-#define CPACR_EL1_TTA (1 << 28)
-
#define kvm_mode_names \
{ PSR_MODE_EL0t, "EL0t" }, \
{ PSR_MODE_EL1t, "EL1t" }, \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index ca2590344313..37414440cee7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -46,25 +46,23 @@
#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/mm.h>
enum __kvm_host_smccc_func {
- /* Hypercalls available only prior to pKVM finalisation */
+ /* Hypercalls that are unavailable once pKVM has finalised. */
/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
- __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
- __KVM_HOST_SMCCC_FUNC___pkvm_init,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
+ __KVM_HOST_SMCCC_FUNC_MIN_PKVM = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
- /* Hypercalls available after pKVM finalisation */
- __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
- __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
+ /* Hypercalls that are always available and common to [nh]VHE/pKVM. */
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
@@ -74,11 +72,42 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
- __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr,
+ __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
+ __KVM_HOST_SMCCC_FUNC_MAX_NO_PKVM = __KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
+
+ /* Hypercalls that are available only when pKVM has finalised. */
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
+ __KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
- __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_in_poison_fault,
+ __KVM_HOST_SMCCC_FUNC___pkvm_force_reclaim_guest_page,
+ __KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_page,
+ __KVM_HOST_SMCCC_FUNC___pkvm_start_teardown_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_finalize_teardown_vm,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
+ __KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___tracing_load,
+ __KVM_HOST_SMCCC_FUNC___tracing_unload,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable,
+ __KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
+ __KVM_HOST_SMCCC_FUNC___tracing_update_clock,
+ __KVM_HOST_SMCCC_FUNC___tracing_reset,
+ __KVM_HOST_SMCCC_FUNC___tracing_enable_event,
+ __KVM_HOST_SMCCC_FUNC___tracing_write_event,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
@@ -236,9 +265,9 @@ extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
-extern void __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
-extern void __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
-extern void __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
+extern int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -247,8 +276,6 @@ extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_gic_config(void);
extern void __vgic_v3_init_lrs(void);
-extern u64 __kvm_get_mdcr_el2(void);
-
#define __KVM_EXTABLE(from, to) \
" .pushsection __kvm_ex_table, \"a\"\n" \
" .align 3\n" \
@@ -283,7 +310,8 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void);
asmlinkage void kvm_unexpected_el2_exception(void);
struct kvm_cpu_context;
void handle_trap(struct kvm_cpu_context *host_ctxt);
-asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
+asmlinkage void __noreturn __kvm_host_psci_cpu_on_entry(void);
+asmlinkage void __noreturn __kvm_host_psci_cpu_resume_entry(void);
void __noreturn __pkvm_init_finalise(void);
void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
void kvm_patch_vector_branch(struct alt_instr *alt,
@@ -295,7 +323,7 @@ void kvm_compute_final_ctr_el0(struct alt_instr *alt,
void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
.macro get_host_ctxt reg, tmp
adr_this_cpu \reg, kvm_host_data, \tmp
diff --git a/arch/arm64/include/asm/kvm_define_hypevents.h b/arch/arm64/include/asm/kvm_define_hypevents.h
new file mode 100644
index 000000000000..77d6790252a6
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_define_hypevents.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define REMOTE_EVENT_INCLUDE_FILE arch/arm64/include/asm/kvm_hypevents.h
+
+#define REMOTE_EVENT_SECTION "_hyp_events"
+
+#define HE_STRUCT(__args) __args
+#define HE_PRINTK(__args...) __args
+#define he_field re_field
+
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ REMOTE_EVENT(__name, 0, RE_STRUCT(__struct), RE_PRINTK(__printk))
+
+#define HYP_EVENT_MULTI_READ
+#include <trace/define_remote_events.h>
+#undef HYP_EVENT_MULTI_READ
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index cf811009a33c..5bf3d7e1d92c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -45,16 +45,41 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
-void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_inject_sync(struct kvm_vcpu *vcpu, u64 esr);
+int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
+static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, false, addr);
+}
+
+static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+ return kvm_inject_sea(vcpu, true, addr);
+}
+
+static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
+{
+ /*
+ * ESR_ELx.ISV (later renamed to IDS) indicates whether or not
+ * ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
+ *
+ * Set the bit when injecting an SError w/o an ESR to indicate ISS
+ * does not follow the architected format.
+ */
+ return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
+}
+
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
+int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
{
@@ -96,22 +121,6 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr_el2;
}
-static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr_el2 &= ~HCR_TWE;
- if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
- vcpu->kvm->arch.vgic.nassgireq)
- vcpu->arch.hcr_el2 &= ~HCR_TWI;
- else
- vcpu->arch.hcr_el2 |= HCR_TWI;
-}
-
-static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.hcr_el2 |= HCR_TWE;
- vcpu->arch.hcr_el2 |= HCR_TWI;
-}
-
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.vsesr_el2;
@@ -184,29 +193,49 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
}
-static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
+static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
{
return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
- (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
+ (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H));
}
-static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
+static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
{
- return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
+ return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
}
-static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
+static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
{
- return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
-}
+ /*
+ * DDI0487L.b Known Issue D22105
+ *
+ * When executing at EL2 and HCR_EL2.{E2H,TGE} = {1, 0} it is
+ * IMPLEMENTATION DEFINED whether the effective value of HCR_EL2.AMO
+ * is the value programmed or 1.
+ *
+ * Make the implementation choice of treating the effective value as 1 as
+ * we cannot subsequently catch changes to TGE or AMO that would
+ * otherwise lead to the SError becoming deliverable.
+ */
+ if (vcpu_is_el2(vcpu) && vcpu_el2_e2h_is_set(vcpu) && !vcpu_el2_tge_is_set(vcpu))
+ return true;
-static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
-{
- return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
+ return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
}
-static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
+static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
{
+ bool e2h, tge;
+ u64 hcr;
+
+ if (!vcpu_has_nv(vcpu))
+ return false;
+
+ hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ e2h = (hcr & HCR_E2H);
+ tge = (hcr & HCR_TGE);
+
/*
* We are in a hypervisor context if the vcpu mode is EL2 or
* E2H and TGE bits are set. The latter means we are in the user space
@@ -215,19 +244,26 @@ static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
* Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
* rest of the KVM code, and will result in a misbehaving guest.
*/
- return vcpu_is_el2_ctxt(ctxt) ||
- (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
- __vcpu_el2_tge_is_set(ctxt);
+ return vcpu_is_el2(vcpu) || (e2h && tge) || tge;
}
-static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
+static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
{
- return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
+ return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
}
-static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
+static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
{
- return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
+ return vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
+}
+
+static inline bool vserror_state_is_nested(struct kvm_vcpu *vcpu)
+{
+ if (!is_nested_ctxt(vcpu))
+ return false;
+
+ return vcpu_el2_amo_is_set(vcpu) ||
+ (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
}
/*
@@ -281,6 +317,19 @@ static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
return vcpu->arch.fault.esr_el2;
}
+static inline bool guest_hyp_wfx_traps_enabled(const struct kvm_vcpu *vcpu)
+{
+ u64 esr = kvm_vcpu_get_esr(vcpu);
+ bool is_wfe = !!(esr & ESR_ELx_WFx_ISS_WFE);
+ u64 hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
+
+ if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
+ return false;
+
+ return ((is_wfe && (hcr_el2 & HCR_TWE)) ||
+ (!is_wfe && (hcr_el2 & HCR_TWI)));
+}
+
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u64 esr = kvm_vcpu_get_esr(vcpu);
@@ -298,7 +347,12 @@ static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vc
static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
- return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
+ u64 hpfar = vcpu->arch.fault.hpfar_el2;
+
+ if (unlikely(!(hpfar & HPFAR_EL2_NS)))
+ return INVALID_GPA;
+
+ return FIELD_GET(HPFAR_EL2_FIPA, hpfar) << 12;
}
static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
@@ -457,21 +511,29 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu)) {
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
- u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
+ enum vcpu_sysreg r;
+ u64 sctlr;
+
+ r = vcpu_has_nv(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
+
+ sctlr = vcpu_read_sys_reg(vcpu, r);
sctlr |= SCTLR_ELx_EE;
- vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
+ vcpu_write_sys_reg(vcpu, sctlr, r);
}
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
+ enum vcpu_sysreg r;
+ u64 bit;
+
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
- if (vcpu_mode_priv(vcpu))
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
- else
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
+ r = is_hyp_ctxt(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
+ bit = vcpu_mode_priv(vcpu) ? SCTLR_ELx_EE : SCTLR_EL1_E0E;
+
+ return vcpu_read_sys_reg(vcpu, r) & bit;
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
@@ -549,110 +611,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu_set_flag((v), e); \
} while (0)
-#define __build_check_all_or_none(r, bits) \
- BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
-
-#define __cpacr_to_cptr_clr(clr, set) \
- ({ \
- u64 cptr = 0; \
- \
- if ((set) & CPACR_ELx_FPEN) \
- cptr |= CPTR_EL2_TFP; \
- if ((set) & CPACR_ELx_ZEN) \
- cptr |= CPTR_EL2_TZ; \
- if ((set) & CPACR_ELx_SMEN) \
- cptr |= CPTR_EL2_TSM; \
- if ((clr) & CPACR_ELx_TTA) \
- cptr |= CPTR_EL2_TTA; \
- if ((clr) & CPTR_EL2_TAM) \
- cptr |= CPTR_EL2_TAM; \
- if ((clr) & CPTR_EL2_TCPAC) \
- cptr |= CPTR_EL2_TCPAC; \
- \
- cptr; \
- })
-
-#define __cpacr_to_cptr_set(clr, set) \
- ({ \
- u64 cptr = 0; \
- \
- if ((clr) & CPACR_ELx_FPEN) \
- cptr |= CPTR_EL2_TFP; \
- if ((clr) & CPACR_ELx_ZEN) \
- cptr |= CPTR_EL2_TZ; \
- if ((clr) & CPACR_ELx_SMEN) \
- cptr |= CPTR_EL2_TSM; \
- if ((set) & CPACR_ELx_TTA) \
- cptr |= CPTR_EL2_TTA; \
- if ((set) & CPTR_EL2_TAM) \
- cptr |= CPTR_EL2_TAM; \
- if ((set) & CPTR_EL2_TCPAC) \
- cptr |= CPTR_EL2_TCPAC; \
- \
- cptr; \
- })
-
-#define cpacr_clear_set(clr, set) \
- do { \
- BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
- BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
- __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
- __build_check_all_or_none((set), CPACR_ELx_FPEN); \
- __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
- __build_check_all_or_none((set), CPACR_ELx_ZEN); \
- __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
- __build_check_all_or_none((set), CPACR_ELx_SMEN); \
- \
- if (has_vhe() || has_hvhe()) \
- sysreg_clear_set(cpacr_el1, clr, set); \
- else \
- sysreg_clear_set(cptr_el2, \
- __cpacr_to_cptr_clr(clr, set), \
- __cpacr_to_cptr_set(clr, set));\
- } while (0)
-
-static __always_inline void kvm_write_cptr_el2(u64 val)
-{
- if (has_vhe() || has_hvhe())
- write_sysreg(val, cpacr_el1);
- else
- write_sysreg(val, cptr_el2);
-}
-
-static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
-{
- u64 val;
-
- if (has_vhe()) {
- val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
- if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_EL1_SMEN_EL1EN;
- } else if (has_hvhe()) {
- val = CPACR_ELx_FPEN;
-
- if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
- val |= CPACR_ELx_ZEN;
- if (cpus_have_final_cap(ARM64_SME))
- val |= CPACR_ELx_SMEN;
- } else {
- val = CPTR_NVHE_EL2_RES1;
-
- if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
- val |= CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME))
- val &= ~CPTR_EL2_TSM;
- }
-
- return val;
-}
-
-static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
-{
- u64 val = kvm_get_reset_cptr_el2(vcpu);
-
- kvm_write_cptr_el2(val);
-}
-
/*
* Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
* format if E2H isn't set.
@@ -685,7 +643,7 @@ static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
(!vcpu_has_nv(vcpu) ? false : \
____cptr_xen_trap_enabled(vcpu, \
- SYS_FIELD_GET(CPACR_ELx, xen, \
+ SYS_FIELD_GET(CPACR_EL1, xen, \
vcpu_sanitised_cptr_el2(vcpu))))
static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
@@ -698,8 +656,36 @@ static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
}
-static inline void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
+static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
{
- vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
+ struct kvm *kvm = vcpu->kvm;
+
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ /*
+ * In general, all HCRX_EL2 bits are gated by a feature.
+ * The only reason we can set SMPME without checking any
+ * feature is that its effects are not directly observable
+ * from the guest.
+ */
+ vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+ vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
+
+ if (kvm_has_tcr2(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
+
+ if (kvm_has_fpmr(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+
+ if (kvm_has_sctlr2(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnALS;
+
+ if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_EnASR;
+ }
}
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e18e9244d17a..851f6171751c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -39,19 +39,22 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 7
+#define KVM_VCPU_MAX_FEATURES 9
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
-#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
-#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
-#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
-#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
+#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
+#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
+#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
+#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
+#define KVM_REQ_VGIC_PROCESS_UPDATE KVM_ARCH_REQ(11)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -85,6 +88,10 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
struct kvm_hyp_memcache {
phys_addr_t head;
unsigned long nr_pages;
+ struct pkvm_mapping *mapping; /* only used from EL1 */
+
+#define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1)
+ unsigned long flags;
};
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
@@ -99,7 +106,7 @@ static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
void *(*to_va)(phys_addr_t phys))
{
- phys_addr_t *p = to_va(mc->head);
+ phys_addr_t *p = to_va(mc->head & PAGE_MASK);
if (!mc->nr_pages)
return NULL;
@@ -194,7 +201,7 @@ struct kvm_s2_mmu {
* host to parse the guest S2.
* This either contains:
* - the virtual VTTBR programmed by the guest hypervisor with
- * CnP cleared
+ * CnP cleared
* - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
*
* We also cache the full VTCR which gets used for TLB invalidation,
@@ -210,6 +217,10 @@ struct kvm_s2_mmu {
*/
bool nested_stage2_enabled;
+#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
+ struct dentry *shadow_pt_debugfs_dentry;
+#endif
+
/*
* true when this MMU needs to be unmapped before being used for a new
* purpose.
@@ -236,15 +247,25 @@ struct kvm_arch_memory_slot {
struct kvm_smccc_features {
unsigned long std_bmap;
unsigned long std_hyp_bmap;
- unsigned long vendor_hyp_bmap;
+ unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
+ unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
};
-typedef unsigned int pkvm_handle_t;
+typedef u16 pkvm_handle_t;
struct kvm_protected_vm {
pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
- bool enabled;
+ struct kvm_hyp_memcache stage2_teardown_mc;
+ bool is_protected;
+ bool is_created;
+
+ /*
+ * True when the guest is being torn down. When in this state, the
+ * guest's vCPUs can't be loaded anymore, but its pages can be
+ * reclaimed by the host.
+ */
+ bool is_dying;
};
struct kvm_mpidr_data {
@@ -266,11 +287,20 @@ struct kvm_sysreg_masks;
enum fgt_group_id {
__NO_FGT_GROUP__,
- HFGxTR_GROUP,
+ HFGRTR_GROUP,
+ HFGWTR_GROUP = HFGRTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP = HDFGRTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,
+ HFGRTR2_GROUP,
+ HFGWTR2_GROUP = HFGRTR2_GROUP,
+ HDFGRTR2_GROUP,
+ HDFGWTR2_GROUP = HDFGRTR2_GROUP,
+ HFGITR2_GROUP,
+ ICH_HFGRTR_GROUP,
+ ICH_HFGWTR_GROUP = ICH_HFGRTR_GROUP,
+ ICH_HFGITR_GROUP,
/* Must be last */
__NR_FGT_GROUP_IDS__
@@ -331,6 +361,12 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
/* Fine-Grained UNDEF initialised */
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
+ /* SVE exposed to guest */
+#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
+ /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
+#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
+ /* Unhandled SEAs are taken to userspace */
+#define KVM_ARCH_FLAG_EXIT_SEA 11
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -348,11 +384,8 @@ struct kvm_arch {
cpumask_var_t supported_cpus;
- /* PMCR_EL0.N value for the guest */
- u8 pmcr_n;
-
- /* Iterator for idreg debugfs */
- u8 idreg_debugfs_iter;
+ /* Maximum number of counters for the guest */
+ u8 nr_pmu_counters;
/* Hypercall features firmware registers' descriptor */
struct kvm_smccc_features smccc_feat;
@@ -370,16 +403,27 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
+ u64 midr_el1;
+ u64 revidr_el1;
+ u64 aidr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
struct kvm_sysreg_masks *sysreg_masks;
+ /* Count the number of VNCR_EL2 currently mapped */
+ atomic_t vncr_map_count;
+
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
*/
struct kvm_protected_vm pkvm;
+
+#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
+ /* Nested virtualization info */
+ struct dentry *debugfs_nv_dentry;
+#endif
};
struct kvm_vcpu_fault_info {
@@ -467,7 +511,6 @@ enum vcpu_sysreg {
DBGVCR32_EL2, /* Debug Vector Catch Register */
/* EL2 registers */
- SCTLR_EL2, /* System Control Register (EL2) */
ACTLR_EL2, /* Auxiliary Control Register (EL2) */
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
HACR_EL2, /* Hypervisor Auxiliary Control Register */
@@ -490,7 +533,6 @@ enum vcpu_sysreg {
VBAR_EL2, /* Vector Base Address Register (EL2) */
RVBAR_EL2, /* Reset Vector Base Address Register */
CONTEXTIDR_EL2, /* Context ID Register (EL2) */
- CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
CNTHP_CTL_EL2,
CNTHP_CVAL_EL2,
@@ -499,8 +541,11 @@ enum vcpu_sysreg {
/* Anything from this can be RES0/RES1 sanitised */
MARKER(__SANITISED_REG_START__),
+ SCTLR_EL2, /* System Control Register (EL2) */
TCR2_EL2, /* Extended Translation Control Register (EL2) */
+ SCTLR2_EL2, /* System Control Register 2 (EL2) */
MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
+ CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
/* Any VNCR-capable reg goes after this point */
MARKER(__VNCR_START__),
@@ -513,6 +558,7 @@ enum vcpu_sysreg {
VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
VNCR(TCR_EL1), /* Translation Control Register */
VNCR(TCR2_EL1), /* Extended Translation Control Register */
+ VNCR(SCTLR2_EL1), /* System Control Register 2 */
VNCR(ESR_EL1), /* Exception Syndrome Register */
VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
@@ -541,12 +587,23 @@ enum vcpu_sysreg {
VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
+ /* FEAT_RAS registers */
+ VNCR(VDISR_EL2),
+ VNCR(VSESR_EL2),
+
VNCR(HFGRTR_EL2),
VNCR(HFGWTR_EL2),
VNCR(HFGITR_EL2),
VNCR(HDFGRTR_EL2),
VNCR(HDFGWTR_EL2),
VNCR(HAFGRTR_EL2),
+ VNCR(HFGRTR2_EL2),
+ VNCR(HFGWTR2_EL2),
+ VNCR(HFGITR2_EL2),
+ VNCR(HDFGRTR2_EL2),
+ VNCR(HDFGWTR2_EL2),
+
+ VNCR(VNCR_EL2),
VNCR(CNTVOFF_EL2),
VNCR(CNTV_CVAL_EL0),
@@ -554,18 +611,112 @@ enum vcpu_sysreg {
VNCR(CNTP_CVAL_EL0),
VNCR(CNTP_CTL_EL0),
+ VNCR(ICH_LR0_EL2),
+ VNCR(ICH_LR1_EL2),
+ VNCR(ICH_LR2_EL2),
+ VNCR(ICH_LR3_EL2),
+ VNCR(ICH_LR4_EL2),
+ VNCR(ICH_LR5_EL2),
+ VNCR(ICH_LR6_EL2),
+ VNCR(ICH_LR7_EL2),
+ VNCR(ICH_LR8_EL2),
+ VNCR(ICH_LR9_EL2),
+ VNCR(ICH_LR10_EL2),
+ VNCR(ICH_LR11_EL2),
+ VNCR(ICH_LR12_EL2),
+ VNCR(ICH_LR13_EL2),
+ VNCR(ICH_LR14_EL2),
+ VNCR(ICH_LR15_EL2),
+
+ VNCR(ICH_AP0R0_EL2),
+ VNCR(ICH_AP0R1_EL2),
+ VNCR(ICH_AP0R2_EL2),
+ VNCR(ICH_AP0R3_EL2),
+ VNCR(ICH_AP1R0_EL2),
+ VNCR(ICH_AP1R1_EL2),
+ VNCR(ICH_AP1R2_EL2),
+ VNCR(ICH_AP1R3_EL2),
VNCR(ICH_HCR_EL2),
+ VNCR(ICH_VMCR_EL2),
+
+ VNCR(ICH_HFGRTR_EL2),
+ VNCR(ICH_HFGWTR_EL2),
+ VNCR(ICH_HFGITR_EL2),
NR_SYS_REGS /* Nothing after this line! */
};
+struct resx {
+ u64 res0;
+ u64 res1;
+};
+
struct kvm_sysreg_masks {
- struct {
- u64 res0;
- u64 res1;
- } mask[NR_SYS_REGS - __SANITISED_REG_START__];
+ struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__];
};
+static inline struct resx __kvm_get_sysreg_resx(struct kvm_arch *arch,
+ enum vcpu_sysreg sr)
+{
+ struct kvm_sysreg_masks *masks;
+
+ masks = arch->sysreg_masks;
+ if (likely(masks &&
+ sr >= __SANITISED_REG_START__ && sr < NR_SYS_REGS))
+ return masks->mask[sr - __SANITISED_REG_START__];
+
+ return (struct resx){};
+}
+
+#define kvm_get_sysreg_resx(k, sr) __kvm_get_sysreg_resx(&(k)->arch, (sr))
+
+static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch,
+ enum vcpu_sysreg sr, struct resx resx)
+{
+ arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx;
+}
+
+#define kvm_set_sysreg_resx(k, sr, resx) \
+ __kvm_set_sysreg_resx(&(k)->arch, (sr), (resx))
+
+struct fgt_masks {
+ const char *str;
+ u64 mask;
+ u64 nmask;
+ u64 res0;
+ u64 res1;
+};
+
+extern struct fgt_masks hfgrtr_masks;
+extern struct fgt_masks hfgwtr_masks;
+extern struct fgt_masks hfgitr_masks;
+extern struct fgt_masks hdfgrtr_masks;
+extern struct fgt_masks hdfgwtr_masks;
+extern struct fgt_masks hafgrtr_masks;
+extern struct fgt_masks hfgrtr2_masks;
+extern struct fgt_masks hfgwtr2_masks;
+extern struct fgt_masks hfgitr2_masks;
+extern struct fgt_masks hdfgrtr2_masks;
+extern struct fgt_masks hdfgwtr2_masks;
+extern struct fgt_masks ich_hfgrtr_masks;
+extern struct fgt_masks ich_hfgwtr_masks;
+extern struct fgt_masks ich_hfgitr_masks;
+
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(ich_hfgitr_masks);
+
struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -610,26 +761,25 @@ struct cpu_sve_state {
* field.
*/
struct kvm_host_data {
+#define KVM_HOST_DATA_FLAG_HAS_SPE 0
+#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
+#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 2
+#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 3
+#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 4
+#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 5
+#define KVM_HOST_DATA_FLAG_HAS_BRBE 6
+ unsigned long flags;
+
struct kvm_cpu_context host_ctxt;
/*
- * All pointers in this union are hyp VA.
+ * Hyp VA.
* sve_state is only used in pKVM and if system_supports_sve().
*/
- union {
- struct user_fpsimd_state *fpsimd_state;
- struct cpu_sve_state *sve_state;
- };
-
- union {
- /* HYP VA pointer to the host storage for FPMR */
- u64 *fpmr_ptr;
- /*
- * Used by pKVM only, as it needs to provide storage
- * for the host
- */
- u64 fpmr;
- };
+ struct cpu_sve_state *sve_state;
+
+ /* Used by pKVM only. */
+ u64 fpmr;
/* Ownership of the FP regs */
enum {
@@ -642,16 +792,40 @@ struct kvm_host_data {
* host_debug_state contains the host registers which are
* saved and restored during world switches.
*/
- struct {
+ struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
/* Statistical profiling extension */
u64 pmscr_el1;
+ u64 pmblimitr_el1;
/* Self-hosted trace */
u64 trfcr_el1;
+ u64 trblimitr_el1;
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2;
+ u64 brbcr_el1;
} host_debug_state;
+
+ /* Guest trace filter value */
+ u64 trfcr_while_in_guest;
+
+ /* Number of programmable event counters (PMCR_EL0.N) for this CPU */
+ unsigned int nr_event_counters;
+
+ /* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
+ unsigned int debug_brps;
+ unsigned int debug_wrps;
+
+ /* Last vgic_irq part of the AP list recorded in an LR */
+ struct vgic_irq *last_lr_irq;
+
+ /* PPI state tracking for GICv5-based guests */
+ struct {
+ DECLARE_BITMAP(pendr, VGIC_V5_NR_PRIVATE_IRQS);
+
+ /* The saved state of the regs when leaving the guest */
+ DECLARE_BITMAP(activer_exit, VGIC_V5_NR_PRIVATE_IRQS);
+ } vgic_v5_ppi_state;
};
struct kvm_host_psci_config {
@@ -684,6 +858,8 @@ struct vcpu_reset_state {
bool reset;
};
+struct vncr_tlb;
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -708,7 +884,11 @@ struct kvm_vcpu_arch {
u64 hcr_el2;
u64 hcrx_el2;
u64 mdcr_el2;
- u64 cptr_el2;
+
+ struct {
+ u64 r;
+ u64 w;
+ } fgt[__NR_FGT_GROUP_IDS__];
/* Exception Information */
struct kvm_vcpu_fault_info fault;
@@ -720,7 +900,7 @@ struct kvm_vcpu_arch {
u8 iflags;
/* State flags for kernel bookkeeping, unused by the hypervisor code */
- u8 sflags;
+ u16 sflags;
/*
* Don't run the guest (internal implementation need).
@@ -739,31 +919,22 @@ struct kvm_vcpu_arch {
*
* external_debug_state contains the debug values we want to debug the
* guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
- *
- * debug_ptr points to the set of debug registers that should be loaded
- * onto the hardware when running the guest.
*/
- struct kvm_guest_debug_arch *debug_ptr;
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
+ u64 external_mdscr_el1;
+
+ enum {
+ VCPU_DEBUG_FREE,
+ VCPU_DEBUG_HOST_OWNED,
+ VCPU_DEBUG_GUEST_OWNED,
+ } debug_owner;
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
struct kvm_pmu pmu;
- /*
- * Guest registers we preserve during guest debugging.
- *
- * These shadow registers are updated by the kvm_handle_sys_reg
- * trap handler if the guest accesses or updates them while we
- * are using guest debug.
- */
- struct {
- u32 mdscr_el1;
- bool pstate_ss;
- } guest_debug_preserved;
-
/* vcpu power state */
struct kvm_mp_state mp_state;
spinlock_t mp_state_lock;
@@ -771,6 +942,9 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
+ /* Pages to top-up the pKVM/EL2 guest pool */
+ struct kvm_hyp_memcache pkvm_memcache;
+
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
@@ -785,6 +959,12 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */
u32 *ccsidr;
+
+ /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */
+ struct vncr_tlb *vncr_tlb;
+
+ /* Hyp-readable copy of kvm_vcpu::pid */
+ pid_t pid;
};
/*
@@ -859,18 +1039,28 @@ struct kvm_vcpu_arch {
__vcpu_flags_preempt_enable(); \
} while (0)
+#define __vcpu_test_and_clear_flag(v, flagset, f, m) \
+ ({ \
+ typeof(v->arch.flagset) set; \
+ \
+ set = __vcpu_get_flag(v, flagset, f, m); \
+ __vcpu_clear_flag(v, flagset, f, m); \
+ \
+ set; \
+ })
+
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
+#define vcpu_test_and_clear_flag(v, ...) \
+ __vcpu_test_and_clear_flag((v), __VA_ARGS__)
-/* SVE exposed to guest */
-#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
-/* PTRAUTH exposed to guest */
-#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
-/* KVM_ARM_VCPU_INIT completed */
-#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
+/* pKVM VCPU setup completed */
+#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -906,29 +1096,25 @@ struct kvm_vcpu_arch {
#define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
#define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
#define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
-/* Guest debug is live */
-#define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
-/* Save SPE context if active */
-#define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
-/* Save TRBE context if active */
-#define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
-
-/* SVE enabled for host EL0 */
-#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
-/* SME enabled for EL0 */
-#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
+
/* Physical CPU not in supported_cpus */
-#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
+#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(0))
/* WFIT instruction trapped */
-#define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
+#define IN_WFIT __vcpu_single_flag(sflags, BIT(1))
/* vcpu system registers loaded on physical CPU */
-#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
-/* Software step state is Active-pending */
-#define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
+#define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(2))
+/* Software step state is Active-pending for external debug */
+#define HOST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(3))
+/* Software step state is Active pending for guest debug */
+#define GUEST_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(4))
/* PMUSERENR for the guest EL0 is on physical CPU */
-#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
+#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5))
/* WFI instruction trapped */
-#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
+#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
+/* KVM is currently emulating a nested ERET */
+#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
+/* SError pending for nested guest */
+#define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -940,33 +1126,42 @@ struct kvm_vcpu_arch {
#define vcpu_sve_zcr_elx(vcpu) \
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
-#define vcpu_sve_state_size(vcpu) ({ \
+#define sve_state_size_from_vl(sve_max_vl) ({ \
size_t __size_ret; \
- unsigned int __vcpu_vq; \
+ unsigned int __vq; \
\
- if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
__size_ret = 0; \
} else { \
- __vcpu_vq = vcpu_sve_max_vq(vcpu); \
- __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ __vq = sve_vq_from_vl(sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vq); \
} \
\
__size_ret; \
})
+#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
+
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
-#define vcpu_has_sve(vcpu) (system_supports_sve() && \
- vcpu_get_flag(vcpu, GUEST_HAS_SVE))
+#define kvm_has_sve(kvm) (system_supports_sve() && \
+ test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags))
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#define vcpu_has_sve(vcpu) kvm_has_sve(kern_hyp_va((vcpu)->kvm))
+#else
+#define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
- vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
+ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || \
+ vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
@@ -1012,118 +1207,39 @@ static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *, enum vcpu_sysreg, u64);
-#define __vcpu_sys_reg(v,r) \
- (*({ \
+
+#define __vcpu_assign_sys_reg(v, r, val) \
+ do { \
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
- u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
+ u64 __v = (val); \
if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
- *__r = kvm_vcpu_apply_reg_masks((v), (r), *__r);\
- __r; \
- }))
-
-u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
-void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
-
-static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
-{
- /*
- * *** VHE ONLY ***
- *
- * System registers listed in the switch are not saved on every
- * exit from the guest but are only saved on vcpu_put.
- *
- * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
- * should never be listed below, because the guest cannot modify its
- * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
- * thread when emulating cross-VCPU communication.
- */
- if (!has_vhe())
- return false;
-
- switch (reg) {
- case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
- case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
- case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
- case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
- case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
- case TCR2_EL1: *val = read_sysreg_s(SYS_TCR2_EL12); break;
- case PIR_EL1: *val = read_sysreg_s(SYS_PIR_EL12); break;
- case PIRE0_EL1: *val = read_sysreg_s(SYS_PIRE0_EL12); break;
- case POR_EL1: *val = read_sysreg_s(SYS_POR_EL12); break;
- case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
- case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
- case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
- case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
- case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
- case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
- case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
- case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
- case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
- case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
- case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
- case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
- case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
- case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
- case PAR_EL1: *val = read_sysreg_par(); break;
- case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
- case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
- case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
- case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
- default: return false;
- }
-
- return true;
-}
+ __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
+ \
+ ctxt_sys_reg(ctxt, (r)) = __v; \
+ } while (0)
-static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
-{
- /*
- * *** VHE ONLY ***
- *
- * System registers listed in the switch are not restored on every
- * entry to the guest but are only restored on vcpu_load.
- *
- * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
- * should never be listed below, because the MPIDR should only be set
- * once, before running the VCPU, and never changed later.
- */
- if (!has_vhe())
- return false;
+#define __vcpu_rmw_sys_reg(v, r, op, val) \
+ do { \
+ const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
+ u64 __v = ctxt_sys_reg(ctxt, (r)); \
+ __v op (val); \
+ if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
+ __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
+ \
+ ctxt_sys_reg(ctxt, (r)) = __v; \
+ } while (0)
- switch (reg) {
- case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
- case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
- case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
- case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
- case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
- case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
- case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
- case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
- case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
- case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
- case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
- case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
- case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
- case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
- case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
- case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
- case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
- case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
- case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
- case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
- case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
- case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
- case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
- case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
- case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
- case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
- case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
- case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
- default: return false;
- }
+#define __vcpu_sys_reg(v,r) \
+ ({ \
+ const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
+ u64 __v = ctxt_sys_reg(ctxt, (r)); \
+ if (vcpu_has_nv((v)) && (r) >= __SANITISED_REG_START__) \
+ __v = kvm_vcpu_apply_reg_masks((v), (r), __v); \
+ __v; \
+ })
- return true;
-}
+u64 vcpu_read_sys_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
+void vcpu_write_sys_reg(struct kvm_vcpu *, u64, enum vcpu_sysreg);
struct kvm_vm_stat {
struct kvm_vm_stat_generic generic;
@@ -1172,9 +1288,8 @@ void kvm_arm_resume_guest(struct kvm *kvm);
})
/*
- * The couple of isb() below are there to guarantee the same behaviour
- * on VHE as on !VHE, where the eret to EL1 acts as a context
- * synchronization event.
+ * The isb() below is there to guarantee the same behaviour on VHE as on !VHE,
+ * where the eret to EL1 acts as a context synchronization event.
*/
#define kvm_call_hyp(f, ...) \
do { \
@@ -1192,7 +1307,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
\
if (has_vhe()) { \
ret = f(__VA_ARGS__); \
- isb(); \
} else { \
ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
} \
@@ -1225,9 +1339,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
unsigned int idx);
int __init populate_nv_trap_config(void);
-bool lock_all_vcpus(struct kvm *kvm);
-void unlock_all_vcpus(struct kvm *kvm);
-
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
/* MMIO helpers */
@@ -1262,7 +1373,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
extern unsigned int __ro_after_init kvm_arm_vmid_bits;
int __init kvm_arm_vmid_alloc_init(void);
void __init kvm_arm_vmid_alloc_free(void);
-bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
+void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
void kvm_arm_vmid_clear_active(void);
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
@@ -1275,8 +1386,6 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
return (vcpu_arch->steal.base != INVALID_GPA);
}
-void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
-
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
@@ -1307,6 +1416,13 @@ DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
&this_cpu_ptr_hyp_sym(kvm_host_data)->f)
#endif
+#define host_data_test_flag(flag) \
+ (test_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags)))
+#define host_data_set_flag(flag) \
+ set_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
+#define host_data_clear_flag(flag) \
+ clear_bit(KVM_HOST_DATA_FLAG_##flag, host_data_ptr(flags))
+
/* Check whether the FP regs are owned by the guest */
static inline bool guest_owns_fp_regs(void)
{
@@ -1330,17 +1446,23 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-
-void kvm_arm_init_debug(void);
-void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
-void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+void kvm_init_host_debug_data(void);
+void kvm_debug_init_vhe(void);
+void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
+void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
+void kvm_debug_set_guest_ownership(struct kvm_vcpu *vcpu);
+void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val);
#define kvm_vcpu_os_lock_enabled(vcpu) \
(!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
+#define kvm_debug_regs_in_use(vcpu) \
+ ((vcpu)->arch.debug_owner != VCPU_DEBUG_FREE)
+#define kvm_host_owns_debug_regs(vcpu) \
+ ((vcpu)->arch.debug_owner == VCPU_DEBUG_HOST_OWNED)
+#define kvm_guest_owns_debug_regs(vcpu) \
+ ((vcpu)->arch.debug_owner == VCPU_DEBUG_GUEST_OWNED)
+
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
@@ -1356,7 +1478,6 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
struct reg_mask_range *range);
/* Guest/host FPSIMD coordination helpers */
-int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
@@ -1367,14 +1488,13 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
return (!has_vhe() && attr->exclude_host);
}
-/* Flags for host debug state */
-void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
-
#ifdef CONFIG_KVM
void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u64 clr);
bool kvm_set_pmuserenr(u64 val);
+void kvm_enable_trbe(void);
+void kvm_disable_trbe(void);
+void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest);
#else
static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u64 clr) {}
@@ -1382,6 +1502,9 @@ static inline bool kvm_set_pmuserenr(u64 val)
{
return false;
}
+static inline void kvm_enable_trbe(void) {}
+static inline void kvm_disable_trbe(void) {}
+static inline void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest) {}
#endif
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
@@ -1397,7 +1520,7 @@ struct kvm *kvm_arch_alloc_vm(void);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
-#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
+#define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.is_protected)
#define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
@@ -1422,6 +1545,7 @@ static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
return test_bit(feature, ka->vcpu_features);
}
+#define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
#define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
@@ -1445,6 +1569,12 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
return &ka->id_regs[IDREG_IDX(reg)];
case SYS_CTR_EL0:
return &ka->ctr_el0;
+ case SYS_MIDR_EL1:
+ return &ka->midr_el1;
+ case SYS_REVIDR_EL1:
+ return &ka->revidr_el1;
+ case SYS_AIDR_EL1:
+ return &ka->aidr_el1;
default:
WARN_ON_ONCE(1);
return NULL;
@@ -1491,12 +1621,16 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
-#define kvm_has_feat(kvm, id, fld, limit) \
+#define __kvm_has_feat(kvm, id, fld, limit) \
kvm_cmp_feat(kvm, id, fld, >=, limit)
-#define kvm_has_feat_enum(kvm, id, fld, val) \
+#define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__)
+
+#define __kvm_has_feat_enum(kvm, id, fld, val) \
kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
+#define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__)
+
#define kvm_has_feat_range(kvm, id, fld, min, max) \
(kvm_cmp_feat(kvm, id, fld, >=, min) && \
kvm_cmp_feat(kvm, id, fld, <=, max))
@@ -1527,6 +1661,76 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1PIE, IMP))
#define kvm_has_s1poe(k) \
- (kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
+ (system_supports_poe() && \
+ kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
+
+#define kvm_has_ras(k) \
+ (kvm_has_feat((k), ID_AA64PFR0_EL1, RAS, IMP))
+
+#define kvm_has_sctlr2(k) \
+ (kvm_has_feat((k), ID_AA64MMFR3_EL1, SCTLRX, IMP))
+
+static inline bool kvm_arch_has_irq_bypass(void)
+{
+ return true;
+}
+
+void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
+struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg);
+void check_feature_map(void);
+void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu);
+
+static __always_inline enum fgt_group_id __fgt_reg_to_group_id(enum vcpu_sysreg reg)
+{
+ switch (reg) {
+ case HFGRTR_EL2:
+ case HFGWTR_EL2:
+ return HFGRTR_GROUP;
+ case HFGITR_EL2:
+ return HFGITR_GROUP;
+ case HDFGRTR_EL2:
+ case HDFGWTR_EL2:
+ return HDFGRTR_GROUP;
+ case HAFGRTR_EL2:
+ return HAFGRTR_GROUP;
+ case HFGRTR2_EL2:
+ case HFGWTR2_EL2:
+ return HFGRTR2_GROUP;
+ case HFGITR2_EL2:
+ return HFGITR2_GROUP;
+ case HDFGRTR2_EL2:
+ case HDFGWTR2_EL2:
+ return HDFGRTR2_GROUP;
+ case ICH_HFGRTR_EL2:
+ case ICH_HFGWTR_EL2:
+ return ICH_HFGRTR_GROUP;
+ case ICH_HFGITR_EL2:
+ return ICH_HFGITR_GROUP;
+ default:
+ BUILD_BUG_ON(1);
+ }
+}
+
+#define vcpu_fgt(vcpu, reg) \
+ ({ \
+ enum fgt_group_id id = __fgt_reg_to_group_id(reg); \
+ u64 *p; \
+ switch (reg) { \
+ case HFGWTR_EL2: \
+ case HDFGWTR_EL2: \
+ case HFGWTR2_EL2: \
+ case HDFGWTR2_EL2: \
+ case ICH_HFGWTR_EL2: \
+ p = &(vcpu)->arch.fgt[id].w; \
+ break; \
+ default: \
+ p = &(vcpu)->arch.fgt[id].r; \
+ break; \
+ } \
+ \
+ p; \
+ })
+
+long kvm_get_cap_for_kvm_ioctl(unsigned int ioctl, long *ext);
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c838309e4ec4..8d06b62e7188 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -76,14 +76,26 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
+u64 __gic_v3_get_lr(unsigned int lr);
+void __gic_v3_set_lr(u64 val, int lr);
+
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
-void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
+void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
+/* GICv5 */
+void __vgic_v5_save_apr(struct vgic_v5_cpu_if *cpu_if);
+void __vgic_v5_restore_vmcr_apr(struct vgic_v5_cpu_if *cpu_if);
+/* No hypercalls for the following */
+void __vgic_v5_save_ppi_state(struct vgic_v5_cpu_if *cpu_if);
+void __vgic_v5_restore_ppi_state(struct vgic_v5_cpu_if *cpu_if);
+void __vgic_v5_save_state(struct vgic_v5_cpu_if *cpu_if);
+void __vgic_v5_restore_state(struct vgic_v5_cpu_if *cpu_if);
+
#ifdef __KVM_NVHE_HYPERVISOR__
void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu);
@@ -126,13 +138,13 @@ void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
#ifdef __KVM_NVHE_HYPERVISOR__
void __pkvm_init_switch_pgd(phys_addr_t pgd, unsigned long sp,
void (*fn)(void));
-int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
- unsigned long *per_cpu_base, u32 hyp_va_bits);
+int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long *per_cpu_base, u32 hyp_va_bits);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
#endif
extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64pfr2_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
@@ -144,5 +156,6 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
+extern unsigned long kvm_nvhe_sym(hyp_nr_cpus);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_hypevents.h b/arch/arm64/include/asm/kvm_hypevents.h
new file mode 100644
index 000000000000..743c49bd878f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hypevents.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(__ARM64_KVM_HYPEVENTS_H_) || defined(HYP_EVENT_MULTI_READ)
+#define __ARM64_KVM_HYPEVENTS_H_
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/trace.h>
+#endif
+
+#ifndef __HYP_ENTER_EXIT_REASON
+#define __HYP_ENTER_EXIT_REASON
+enum hyp_enter_exit_reason {
+ HYP_REASON_SMC,
+ HYP_REASON_HVC,
+ HYP_REASON_PSCI,
+ HYP_REASON_HOST_ABORT,
+ HYP_REASON_GUEST_EXIT,
+ HYP_REASON_ERET_HOST,
+ HYP_REASON_ERET_GUEST,
+ HYP_REASON_UNKNOWN /* Must be last */
+};
+#endif
+
+HYP_EVENT(hyp_enter,
+ HE_PROTO(struct kvm_cpu_context *host_ctxt, u8 reason),
+ HE_STRUCT(
+ he_field(u8, reason)
+ he_field(pid_t, vcpu)
+ ),
+ HE_ASSIGN(
+ __entry->reason = reason;
+ __entry->vcpu = __tracing_get_vcpu_pid(host_ctxt);
+ ),
+ HE_PRINTK("reason=%s vcpu=%d", __hyp_enter_exit_reason_str(__entry->reason), __entry->vcpu)
+);
+
+HYP_EVENT(hyp_exit,
+ HE_PROTO(struct kvm_cpu_context *host_ctxt, u8 reason),
+ HE_STRUCT(
+ he_field(u8, reason)
+ he_field(pid_t, vcpu)
+ ),
+ HE_ASSIGN(
+ __entry->reason = reason;
+ __entry->vcpu = __tracing_get_vcpu_pid(host_ctxt);
+ ),
+ HE_PRINTK("reason=%s vcpu=%d", __hyp_enter_exit_reason_str(__entry->reason), __entry->vcpu)
+);
+
+HYP_EVENT(selftest,
+ HE_PROTO(u64 id),
+ HE_STRUCT(
+ he_field(u64, id)
+ ),
+ HE_ASSIGN(
+ __entry->id = id;
+ ),
+ RE_PRINTK("id=%llu", __entry->id)
+);
+#endif
diff --git a/arch/arm64/include/asm/kvm_hyptrace.h b/arch/arm64/include/asm/kvm_hyptrace.h
new file mode 100644
index 000000000000..de133b735f72
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hyptrace.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ARM64_KVM_HYPTRACE_H_
+#define __ARM64_KVM_HYPTRACE_H_
+
+#include <linux/ring_buffer.h>
+
+struct hyp_trace_desc {
+ unsigned long bpages_backing_start;
+ size_t bpages_backing_size;
+ struct trace_buffer_desc trace_buffer_desc;
+
+};
+
+struct hyp_event_id {
+ unsigned short id;
+ atomic_t enabled;
+};
+
+extern struct remote_event __hyp_events_start[];
+extern struct remote_event __hyp_events_end[];
+
+/* hyp_event section used by the hypervisor */
+extern struct hyp_event_id __hyp_event_ids_start[];
+extern struct hyp_event_id __hyp_event_ids_end[];
+
+#endif
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 66d93e320ec8..01e9c72d6aa7 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -49,7 +49,7 @@
* mappings, and none of this applies in that case.
*/
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/alternative.h>
@@ -103,6 +103,7 @@ alternative_cb_end
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
+u32 kvm_hyp_va_bits(void);
void kvm_apply_hyp_relocations(void);
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
@@ -139,6 +140,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
+extern u32 __hyp_va_bits;
+
/*
* We currently support using a VM-specified IPA size. For backward
* compatibility, the default IPA size is fixed to 40bits.
@@ -178,11 +181,12 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable);
+int kvm_handle_guest_sea(struct kvm_vcpu *vcpu);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-int __init kvm_mmu_init(u32 *hyp_va_bits);
+int __init kvm_mmu_init(u32 hyp_va_bits);
static inline void *__kvm_vector_slot2addr(void *base,
enum arm64_hyp_spectre_vector slot)
@@ -353,11 +357,49 @@ static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
return &kvm->arch.mmu != mmu;
}
+static inline void kvm_fault_lock(struct kvm *kvm)
+{
+ if (is_protected_kvm_enabled())
+ write_lock(&kvm->mmu_lock);
+ else
+ read_lock(&kvm->mmu_lock);
+}
+
+static inline void kvm_fault_unlock(struct kvm *kvm)
+{
+ if (is_protected_kvm_enabled())
+ write_unlock(&kvm->mmu_lock);
+ else
+ read_unlock(&kvm->mmu_lock);
+}
+
+/*
+ * ARM64 KVM relies on a simple conversion from physaddr to a kernel
+ * virtual address (KVA) when it does cache maintenance as the CMO
+ * instructions work on virtual addresses. This is incompatible with
+ * VM_PFNMAP VMAs which may not have a kernel direct mapping to a
+ * virtual address.
+ *
+ * With S2FWB and CACHE DIC features, KVM need not do cache flushing
+ * and CMOs are NOP'd. This has the effect of no longer requiring a
+ * KVA for addresses mapped into the S2. The presence of these features
+ * are thus necessary to support cacheable S2 mapping of VM_PFNMAP.
+ */
+static inline bool kvm_supports_cacheable_pfnmap(void)
+{
+ return cpus_have_final_cap(ARM64_HAS_STAGE2_FWB) &&
+ cpus_have_final_cap(ARM64_HAS_CACHE_DIC);
+}
+
#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
void kvm_s2_ptdump_create_debugfs(struct kvm *kvm);
+void kvm_nested_s2_ptdump_create_debugfs(struct kvm_s2_mmu *mmu);
+void kvm_nested_s2_ptdump_remove_debugfs(struct kvm_s2_mmu *mmu);
#else
static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {}
+static inline void kvm_nested_s2_ptdump_create_debugfs(struct kvm_s2_mmu *mmu) {}
+static inline void kvm_nested_s2_ptdump_remove_debugfs(struct kvm_s2_mmu *mmu) {}
#endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_mte.h b/arch/arm64/include/asm/kvm_mte.h
index de002636eb1f..3171963ad25c 100644
--- a/arch/arm64/include/asm/kvm_mte.h
+++ b/arch/arm64/include/asm/kvm_mte.h
@@ -5,7 +5,7 @@
#ifndef __ASM_KVM_MTE_H
#define __ASM_KVM_MTE_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/sysreg.h>
@@ -62,5 +62,5 @@ alternative_else_nop_endif
.endm
#endif /* CONFIG_ARM64_MTE */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_KVM_MTE_H */
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 233e65522716..091544e6af44 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -33,14 +33,14 @@ static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
{
- u64 cpacr_el1 = CPACR_ELx_RES1;
+ u64 cpacr_el1 = CPACR_EL1_RES1;
if (cptr_el2 & CPTR_EL2_TTA)
- cpacr_el1 |= CPACR_ELx_TTA;
+ cpacr_el1 |= CPACR_EL1_TTA;
if (!(cptr_el2 & CPTR_EL2_TFP))
- cpacr_el1 |= CPACR_ELx_FPEN;
+ cpacr_el1 |= CPACR_EL1_FPEN;
if (!(cptr_el2 & CPTR_EL2_TZ))
- cpacr_el1 |= CPACR_ELx_ZEN;
+ cpacr_el1 |= CPACR_EL1_ZEN;
cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
@@ -64,6 +64,7 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
}
extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
+extern bool forward_debug_exception(struct kvm_vcpu *vcpu);
extern void kvm_init_nested(struct kvm *kvm);
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
@@ -79,6 +80,10 @@ extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
+extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
+extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
+
+extern void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu);
struct kvm_s2_trans {
phys_addr_t output;
@@ -115,9 +120,42 @@ static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
return trans->writable;
}
-static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
+static inline bool kvm_has_xnx(struct kvm *kvm)
{
- return !(trans->desc & BIT(54));
+ return cpus_have_final_cap(ARM64_HAS_XNX) &&
+ kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP);
+}
+
+static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans)
+{
+ u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
+
+ if (!kvm_has_xnx(kvm))
+ xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
+
+ switch (xn) {
+ case 0b00:
+ case 0b01:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans)
+{
+ u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
+
+ if (!kvm_has_xnx(kvm))
+ xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
+
+ switch (xn) {
+ case 0b00:
+ case 0b11:
+ return true;
+ default:
+ return false;
+ }
}
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
@@ -186,7 +224,8 @@ static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
return true;
}
-int kvm_init_nv_sysregs(struct kvm *kvm);
+int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
+u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
#ifdef CONFIG_ARM64_PTR_AUTH
bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
@@ -229,7 +268,39 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
shift; \
})
-static inline unsigned int ps_to_output_size(unsigned int ps)
+static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
+{
+ u64 base, tg, num, scale;
+ int shift;
+
+ tg = FIELD_GET(GENMASK(47, 46), val);
+
+ switch(tg) {
+ case 1:
+ shift = 12;
+ break;
+ case 2:
+ shift = 14;
+ break;
+ case 3:
+ default: /* IMPDEF: handle tg==0 as 64k */
+ shift = 16;
+ break;
+ }
+
+ base = (val & GENMASK(36, 0)) << shift;
+
+ if (asid)
+ *asid = FIELD_GET(TLBIR_ASID_MASK, val);
+
+ scale = FIELD_GET(GENMASK(45, 44), val);
+ num = FIELD_GET(GENMASK(43, 39), val);
+ *range = __TLBI_RANGE_PAGES(num, scale) << shift;
+
+ return base;
+}
+
+static inline unsigned int ps_to_output_size(unsigned int ps, bool pa52bit)
{
switch (ps) {
case 0: return 32;
@@ -237,10 +308,104 @@ static inline unsigned int ps_to_output_size(unsigned int ps)
case 2: return 40;
case 3: return 42;
case 4: return 44;
- case 5:
+ case 5: return 48;
+ case 6: if (pa52bit)
+ return 52;
+ fallthrough;
default:
return 48;
}
}
+enum trans_regime {
+ TR_EL10,
+ TR_EL20,
+ TR_EL2,
+};
+
+struct s1_walk_info;
+
+struct s1_walk_context {
+ struct s1_walk_info *wi;
+ u64 table_ipa;
+ int level;
+};
+
+struct s1_walk_filter {
+ int (*fn)(struct s1_walk_context *, void *);
+ void *priv;
+};
+
+struct s1_walk_info {
+ struct s1_walk_filter *filter;
+ u64 baddr;
+ enum trans_regime regime;
+ unsigned int max_oa_bits;
+ unsigned int pgshift;
+ unsigned int txsz;
+ int sl;
+ u8 sh;
+ bool as_el0;
+ bool hpd;
+ bool e0poe;
+ bool poe;
+ bool pan;
+ bool be;
+ bool s2;
+ bool pa52bit;
+ bool ha;
+};
+
+struct s1_walk_result {
+ union {
+ struct {
+ u64 desc;
+ u64 pa;
+ s8 level;
+ u8 APTable;
+ bool nG;
+ u16 asid;
+ bool UXNTable;
+ bool PXNTable;
+ bool uwxn;
+ bool uov;
+ bool ur;
+ bool uw;
+ bool ux;
+ bool pwxn;
+ bool pov;
+ bool pr;
+ bool pw;
+ bool px;
+ };
+ struct {
+ u8 fst;
+ bool ptw;
+ bool s2;
+ };
+ };
+ bool failed;
+};
+
+int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
+ struct s1_walk_result *wr, u64 va);
+int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa,
+ int *level);
+
+/* VNCR management */
+int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
+int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
+void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
+
+u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime);
+
+#define vncr_fixmap(c) \
+ ({ \
+ u32 __c = (c); \
+ BUG_ON(__c >= NR_CPUS); \
+ (FIX_VNCR - __c); \
+ })
+
+int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new);
+
#endif /* __ARM64_KVM_NESTED_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index aab04097b505..41a8687938eb 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -59,6 +59,11 @@ typedef u64 kvm_pte_t;
#define KVM_PHYS_INVALID (-1ULL)
+#define KVM_PTE_TYPE BIT(1)
+#define KVM_PTE_TYPE_BLOCK 0
+#define KVM_PTE_TYPE_PAGE 1
+#define KVM_PTE_TYPE_TABLE 1
+
#define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
#define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
@@ -83,8 +88,10 @@ typedef u64 kvm_pte_t;
#define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
+#define KVM_PTE_LEAF_ATTR_HI_S1_UXN BIT(54)
+#define KVM_PTE_LEAF_ATTR_HI_S1_PXN BIT(53)
-#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
+#define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
#define KVM_PTE_LEAF_ATTR_HI_S1_GP BIT(50)
@@ -92,14 +99,30 @@ typedef u64 kvm_pte_t;
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
-#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
-#define KVM_MAX_OWNER_ID 1
+/* pKVM invalid pte encodings */
+#define KVM_INVALID_PTE_TYPE_MASK GENMASK(63, 60)
+#define KVM_INVALID_PTE_ANNOT_MASK ~(KVM_PTE_VALID | \
+ KVM_INVALID_PTE_TYPE_MASK)
-/*
- * Used to indicate a pte for which a 'break-before-make' sequence is in
- * progress.
- */
-#define KVM_INVALID_PTE_LOCKED BIT(10)
+enum kvm_invalid_pte_type {
+ /*
+ * Used to indicate a pte for which a 'break-before-make'
+ * sequence is in progress.
+ */
+ KVM_INVALID_PTE_TYPE_LOCKED = 1,
+
+ /*
+ * pKVM has unmapped the page from the host due to a change of
+ * ownership.
+ */
+ KVM_HOST_INVALID_PTE_TYPE_DONATION,
+
+ /*
+ * The page has been forcefully reclaimed from the guest by the
+ * host.
+ */
+ KVM_GUEST_INVALID_PTE_TYPE_POISONED,
+};
static inline bool kvm_pte_valid(kvm_pte_t pte)
{
@@ -224,18 +247,19 @@ struct kvm_pgtable_mm_ops {
/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
- * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
- * ARM64_HAS_STAGE2_FWB.
* @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
+ * @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1.
*/
enum kvm_pgtable_stage2_flags {
- KVM_PGTABLE_S2_NOFWB = BIT(0),
- KVM_PGTABLE_S2_IDMAP = BIT(1),
+ KVM_PGTABLE_S2_IDMAP = BIT(0),
+ KVM_PGTABLE_S2_AS_S1 = BIT(1),
};
/**
* enum kvm_pgtable_prot - Page-table permissions and attributes.
- * @KVM_PGTABLE_PROT_X: Execute permission.
+ * @KVM_PGTABLE_PROT_UX: Unprivileged execute permission.
+ * @KVM_PGTABLE_PROT_PX: Privileged execute permission.
+ * @KVM_PGTABLE_PROT_X: Privileged and unprivileged execute permission.
* @KVM_PGTABLE_PROT_W: Write permission.
* @KVM_PGTABLE_PROT_R: Read permission.
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
@@ -246,12 +270,15 @@ enum kvm_pgtable_stage2_flags {
* @KVM_PGTABLE_PROT_SW3: Software bit 3.
*/
enum kvm_pgtable_prot {
- KVM_PGTABLE_PROT_X = BIT(0),
- KVM_PGTABLE_PROT_W = BIT(1),
- KVM_PGTABLE_PROT_R = BIT(2),
+ KVM_PGTABLE_PROT_PX = BIT(0),
+ KVM_PGTABLE_PROT_UX = BIT(1),
+ KVM_PGTABLE_PROT_X = KVM_PGTABLE_PROT_PX |
+ KVM_PGTABLE_PROT_UX,
+ KVM_PGTABLE_PROT_W = BIT(2),
+ KVM_PGTABLE_PROT_R = BIT(3),
- KVM_PGTABLE_PROT_DEVICE = BIT(3),
- KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
+ KVM_PGTABLE_PROT_DEVICE = BIT(4),
+ KVM_PGTABLE_PROT_NORMAL_NC = BIT(5),
KVM_PGTABLE_PROT_SW0 = BIT(55),
KVM_PGTABLE_PROT_SW1 = BIT(56),
@@ -283,8 +310,8 @@ typedef bool (*kvm_pgtable_force_pte_cb_t)(u64 addr, u64 end,
* children.
* @KVM_PGTABLE_WALK_SHARED: Indicates the page-tables may be shared
* with other software walkers.
- * @KVM_PGTABLE_WALK_HANDLE_FAULT: Indicates the page-table walk was
- * invoked from a fault handler.
+ * @KVM_PGTABLE_WALK_IGNORE_EAGAIN: Don't terminate the walk early if
+ * the walker returns -EAGAIN.
* @KVM_PGTABLE_WALK_SKIP_BBM_TLBI: Visit and update table entries
* without Break-before-make's
* TLB invalidation.
@@ -297,7 +324,7 @@ enum kvm_pgtable_walk_flags {
KVM_PGTABLE_WALK_TABLE_PRE = BIT(1),
KVM_PGTABLE_WALK_TABLE_POST = BIT(2),
KVM_PGTABLE_WALK_SHARED = BIT(3),
- KVM_PGTABLE_WALK_HANDLE_FAULT = BIT(4),
+ KVM_PGTABLE_WALK_IGNORE_EAGAIN = BIT(4),
KVM_PGTABLE_WALK_SKIP_BBM_TLBI = BIT(5),
KVM_PGTABLE_WALK_SKIP_CMO = BIT(6),
};
@@ -350,6 +377,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
return pteref;
}
+static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
+{
+ return pteref;
+}
+
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
{
/*
@@ -379,6 +411,11 @@ static inline kvm_pte_t *kvm_dereference_pteref(struct kvm_pgtable_walker *walke
return rcu_dereference_check(pteref, !(walker->flags & KVM_PGTABLE_WALK_SHARED));
}
+static inline kvm_pte_t *kvm_dereference_pteref_raw(kvm_pteref_t pteref)
+{
+ return rcu_dereference_raw(pteref);
+}
+
static inline int kvm_pgtable_walk_begin(struct kvm_pgtable_walker *walker)
{
if (walker->flags & KVM_PGTABLE_WALK_SHARED)
@@ -412,15 +449,20 @@ static inline bool kvm_pgtable_walk_lock_held(void)
* be used instead of block mappings.
*/
struct kvm_pgtable {
- u32 ia_bits;
- s8 start_level;
- kvm_pteref_t pgd;
- struct kvm_pgtable_mm_ops *mm_ops;
-
- /* Stage-2 only */
- struct kvm_s2_mmu *mmu;
- enum kvm_pgtable_stage2_flags flags;
- kvm_pgtable_force_pte_cb_t force_pte_cb;
+ union {
+ struct rb_root_cached pkvm_mappings;
+ struct {
+ u32 ia_bits;
+ s8 start_level;
+ kvm_pteref_t pgd;
+ struct kvm_pgtable_mm_ops *mm_ops;
+
+ /* Stage-2 only */
+ enum kvm_pgtable_stage2_flags flags;
+ kvm_pgtable_force_pte_cb_t force_pte_cb;
+ };
+ };
+ struct kvm_s2_mmu *mmu;
};
/**
@@ -526,8 +568,11 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb);
-#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
- __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
+static inline int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
+{
+ return __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL);
+}
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
@@ -539,6 +584,26 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
+ * kvm_pgtable_stage2_destroy_range() - Destroy the unlinked range of addresses.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @addr: Intermediate physical address at which to place the mapping.
+ * @size: Size of the mapping.
+ *
+ * The page-table is assumed to be unreachable by any hardware walkers prior
+ * to freeing and therefore no TLB invalidation is performed.
+ */
+void kvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
+ u64 addr, u64 size);
+
+/**
+ * kvm_pgtable_stage2_destroy_pgd() - Destroy the PGD of guest stage-2 page-table.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
+ *
+ * It is assumed that the rest of the page-table is freed before this operation.
+ */
+void kvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
+
+/**
* kvm_pgtable_stage2_free_unlinked() - Free an unlinked stage-2 paging structure.
* @mm_ops: Memory management callbacks.
* @pgtable: Unlinked stage-2 paging structure to be freed.
@@ -609,14 +674,18 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
void *mc, enum kvm_pgtable_walk_flags flags);
/**
- * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
- * track ownership.
+ * kvm_pgtable_stage2_annotate() - Unmap and annotate pages in the IPA space
+ * to track ownership (and more).
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Base intermediate physical address to annotate.
* @size: Size of the annotated range.
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
* page-table pages.
- * @owner_id: Unique identifier for the owner of the page.
+ * @type: The type of the annotation, determining its meaning and format.
+ * @annotation: A 59-bit value that will be stored in the page tables.
+ * @annotation[0] and @annotation[63:60] must be 0.
+ * @annotation[59:1] is stored in the page tables, along
+ * with @type.
*
* By default, all page-tables are owned by identifier 0. This function can be
* used to mark portions of the IPA space as owned by other entities. When a
@@ -625,8 +694,9 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
*
* Return: 0 on success, negative error code on failure.
*/
-int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
- void *mc, u8 owner_id);
+int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ void *mc, enum kvm_invalid_pte_type type,
+ kvm_pte_t annotation);
/**
* kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
@@ -669,13 +739,15 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
* kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored.
*
* If there is a valid, leaf page-table entry used to translate @addr, then
* set the access flag in that entry.
*/
-void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
+void kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
@@ -705,6 +777,7 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
* @prot: Additional permissions to grant for the mapping.
+ * @flags: Flags to control the page-table walk (ex. a shared walk)
*
* The offset of @addr within a page is ignored.
*
@@ -717,7 +790,8 @@ bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
* Return: 0 on success, negative error code on failure.
*/
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
- enum kvm_pgtable_prot prot);
+ enum kvm_pgtable_prot prot,
+ enum kvm_pgtable_walk_flags flags);
/**
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
index cd56acd9a842..2954b311128c 100644
--- a/arch/arm64/include/asm/kvm_pkvm.h
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -9,6 +9,7 @@
#include <linux/arm_ffa.h>
#include <linux/memblock.h>
#include <linux/scatterlist.h>
+#include <asm/kvm_host.h>
#include <asm/kvm_pgtable.h>
/* Maximum number of VMs that can co-exist under pKVM. */
@@ -16,9 +17,57 @@
#define HYP_MEMBLOCK_REGIONS 128
-int pkvm_init_host_vm(struct kvm *kvm);
+int pkvm_init_host_vm(struct kvm *kvm, unsigned long type);
int pkvm_create_hyp_vm(struct kvm *kvm);
+bool pkvm_hyp_vm_is_created(struct kvm *kvm);
void pkvm_destroy_hyp_vm(struct kvm *kvm);
+int pkvm_create_hyp_vcpu(struct kvm_vcpu *vcpu);
+
+/*
+ * Check whether the specific capability is allowed in pKVM.
+ *
+ * Certain features are allowed only for non-protected VMs in pKVM, which is why
+ * this takes the VM (kvm) as a parameter.
+ */
+static inline bool kvm_pkvm_ext_allowed(struct kvm *kvm, long ext)
+{
+ switch (ext) {
+ case KVM_CAP_IRQCHIP:
+ case KVM_CAP_ARM_PSCI:
+ case KVM_CAP_ARM_PSCI_0_2:
+ case KVM_CAP_NR_VCPUS:
+ case KVM_CAP_MAX_VCPUS:
+ case KVM_CAP_MAX_VCPU_ID:
+ case KVM_CAP_MSI_DEVID:
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ case KVM_CAP_ARM_PTRAUTH_ADDRESS:
+ case KVM_CAP_ARM_PTRAUTH_GENERIC:
+ return true;
+ case KVM_CAP_ARM_MTE:
+ return false;
+ default:
+ return !kvm || !kvm_vm_is_protected(kvm);
+ }
+}
+
+/*
+ * Check whether the KVM VM IOCTL is allowed in pKVM.
+ *
+ * Certain features are allowed only for non-protected VMs in pKVM, which is why
+ * this takes the VM (kvm) as a parameter.
+ */
+static inline bool kvm_pkvm_ioctl_allowed(struct kvm *kvm, unsigned int ioctl)
+{
+ long ext;
+ int r;
+
+ r = kvm_get_cap_for_kvm_ioctl(ioctl, &ext);
+
+ if (WARN_ON_ONCE(r < 0))
+ return false;
+
+ return kvm_pkvm_ext_allowed(kvm, ext);
+}
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
@@ -109,6 +158,12 @@ static inline unsigned long host_s2_pgtable_pages(void)
return res;
}
+#ifdef CONFIG_NVHE_EL2_DEBUG
+static inline unsigned long pkvm_selftest_pages(void) { return 32; }
+#else
+static inline unsigned long pkvm_selftest_pages(void) { return 0; }
+#endif
+
#define KVM_FFA_MBOX_NR_PAGES 1
static inline unsigned long hyp_ffa_proxy_pages(void)
@@ -137,4 +192,34 @@ static inline size_t pkvm_host_sve_state_size(void)
SVE_SIG_REGS_SIZE(sve_vq_from_vl(kvm_host_sve_max_vl)));
}
+struct pkvm_mapping {
+ struct rb_node node;
+ u64 gfn;
+ u64 pfn;
+ u64 nr_pages;
+ u64 __subtree_last; /* Internal member for interval tree */
+};
+
+int pkvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops);
+void pkvm_pgtable_stage2_destroy_range(struct kvm_pgtable *pgt,
+ u64 addr, u64 size);
+void pkvm_pgtable_stage2_destroy_pgd(struct kvm_pgtable *pgt);
+int pkvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
+ enum kvm_pgtable_prot prot, void *mc,
+ enum kvm_pgtable_walk_flags flags);
+int pkvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
+int pkvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
+int pkvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
+bool pkvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, u64 size, bool mkold);
+int pkvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot,
+ enum kvm_pgtable_walk_flags flags);
+void pkvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_walk_flags flags);
+int pkvm_pgtable_stage2_split(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct kvm_mmu_memory_cache *mc);
+void pkvm_pgtable_stage2_free_unlinked(struct kvm_pgtable_mm_ops *mm_ops, void *pgtable, s8 level);
+kvm_pte_t *pkvm_pgtable_stage2_create_unlinked(struct kvm_pgtable *pgt, u64 phys, s8 level,
+ enum kvm_pgtable_prot prot, void *mc,
+ bool force_pte);
#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h
index 6199c9f7ec6e..e50987b32483 100644
--- a/arch/arm64/include/asm/kvm_ptrauth.h
+++ b/arch/arm64/include/asm/kvm_ptrauth.h
@@ -8,7 +8,7 @@
#ifndef __ASM_KVM_PTRAUTH_H
#define __ASM_KVM_PTRAUTH_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/sysreg.h>
@@ -100,7 +100,7 @@ alternative_else_nop_endif
.endm
#endif /* CONFIG_ARM64_PTR_AUTH */
-#else /* !__ASSEMBLY */
+#else /* !__ASSEMBLER__ */
#define __ptrauth_save_key(ctxt, key) \
do { \
@@ -120,5 +120,5 @@ alternative_else_nop_endif
__ptrauth_save_key(ctxt, APGA); \
} while(0)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_KVM_PTRAUTH_H */
diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h
deleted file mode 100644
index 87e10d9a635b..000000000000
--- a/arch/arm64/include/asm/kvm_ras.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 - Arm Ltd */
-
-#ifndef __ARM64_KVM_RAS_H__
-#define __ARM64_KVM_RAS_H__
-
-#include <linux/acpi.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-
-#include <asm/acpi.h>
-
-/*
- * Was this synchronous external abort a RAS notification?
- * Returns '0' for errors handled by some RAS subsystem, or -ENOENT.
- */
-static inline int kvm_handle_guest_sea(phys_addr_t addr, u64 esr)
-{
- /* apei_claim_sea(NULL) expects to mask interrupts itself */
- lockdep_assert_irqs_enabled();
-
- return apei_claim_sea(NULL);
-}
-
-#endif /* __ARM64_KVM_RAS_H__ */
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index d3acd9c87509..40bd17add539 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -1,7 +1,7 @@
#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/assembler.h>
#endif
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index 3129a5819d0e..1e77c45bb0a8 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -4,8 +4,6 @@
#include <asm/atomic_ll_sc.h>
-#ifdef CONFIG_ARM64_LSE_ATOMICS
-
#define __LSE_PREAMBLE ".arch_extension lse\n"
#include <linux/compiler_types.h>
@@ -27,11 +25,4 @@
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
-#else /* CONFIG_ARM64_LSE_ATOMICS */
-
-#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__)
-
-#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
-
-#endif /* CONFIG_ARM64_LSE_ATOMICS */
#endif /* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/lsui.h b/arch/arm64/include/asm/lsui.h
new file mode 100644
index 000000000000..8f0d81953eb6
--- /dev/null
+++ b/arch/arm64/include/asm/lsui.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_LSUI_H
+#define __ASM_LSUI_H
+
+#include <linux/compiler_types.h>
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/alternative-macros.h>
+#include <asm/cpucaps.h>
+
+#define __LSUI_PREAMBLE ".arch_extension lsui\n"
+
+#ifdef CONFIG_ARM64_LSUI
+
+#define __lsui_llsc_body(op, ...) \
+({ \
+ alternative_has_cap_unlikely(ARM64_HAS_LSUI) ? \
+ __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__); \
+})
+
+#else /* CONFIG_ARM64_LSUI */
+
+#define __lsui_llsc_body(op, ...) __llsc_##op(__VA_ARGS__)
+
+#endif /* CONFIG_ARM64_LSUI */
+
+#endif /* __ASM_LSUI_H */
diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
index f8f78f622dd2..314b2b52025f 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -4,6 +4,8 @@
#include <asm/rsi.h>
+struct device;
+
struct arm64_mem_crypt_ops {
int (*encrypt)(unsigned long addr, int numpages);
int (*decrypt)(unsigned long addr, int numpages);
@@ -21,4 +23,15 @@ static inline bool force_dma_unencrypted(struct device *dev)
return is_realm_world();
}
+/*
+ * For Arm CCA guests, canonical addresses are "encrypted", so no changes
+ * required for dma_addr_encrypted().
+ * The unencrypted DMA buffers must be accessed via the unprotected IPA,
+ * "top IPA bit" set.
+ */
+#define dma_addr_unencrypted(x) ((x) | PROT_NS_SHARED)
+
+/* Clear the "top" IPA bit while converting back */
+#define dma_addr_canonical(x) ((x) & ~PROT_NS_SHARED)
+
#endif /* __ASM_MEM_ENCRYPT_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 8b9f33cf561b..a2b7a33966ff 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -118,7 +118,7 @@
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
* stacks are a multiple of page size.
*/
-#if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
+#if (MIN_THREAD_SHIFT < PAGE_SHIFT)
#define THREAD_SHIFT PAGE_SHIFT
#else
#define THREAD_SHIFT MIN_THREAD_SHIFT
@@ -135,23 +135,22 @@
* checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
* assembly.
*/
-#ifdef CONFIG_VMAP_STACK
#define THREAD_ALIGN (2 * THREAD_SIZE)
-#else
-#define THREAD_ALIGN THREAD_SIZE
-#endif
#define IRQ_STACK_SIZE THREAD_SIZE
#define OVERFLOW_STACK_SIZE SZ_4K
+#define NVHE_STACK_SHIFT PAGE_SHIFT
+#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT)
+
/*
* With the minimum frame size of [x29, x30], exactly half the combined
* sizes of the hyp and overflow stacks is the maximum size needed to
* save the unwinded stacktrace; plus an additional entry to delimit the
* end.
*/
-#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long))
+#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long))
/*
* Alignment of kernel segments (e.g. .text, .data).
@@ -176,19 +175,24 @@
#define MT_DEVICE_nGnRE 4
/*
- * Memory types for Stage-2 translation
+ * Memory types for Stage-2 translation when HCR_EL2.FWB=0. See R_HMNDG,
+ * R_TNHFM, R_GQFSF and I_MCQKW for the details on how these attributes get
+ * combined with Stage-1.
*/
#define MT_S2_NORMAL 0xf
#define MT_S2_NORMAL_NC 0x5
#define MT_S2_DEVICE_nGnRE 0x1
+#define MT_S2_AS_S1 MT_S2_NORMAL
/*
- * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
- * Stage-2 enforces Normal-WB and Device-nGnRE
+ * Memory types for Stage-2 translation when HCR_EL2.FWB=1. Stage-2 enforces
+ * Normal-WB and Device-nGnRE, unless we actively say that S1 wins. See
+ * R_VRJSW and R_RHWZM for details.
*/
#define MT_S2_FWB_NORMAL 6
#define MT_S2_FWB_NORMAL_NC 5
#define MT_S2_FWB_DEVICE_nGnRE 1
+#define MT_S2_FWB_AS_S1 7
#ifdef CONFIG_ARM64_4K_PAGES
#define IOREMAP_MAX_ORDER (PUD_SHIFT)
@@ -208,7 +212,7 @@
*/
#define TRAMP_SWAPPER_OFFSET (2 * PAGE_SIZE)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitops.h>
#include <linux/compiler.h>
@@ -309,6 +313,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define arch_enable_tag_checks_sync() mte_enable_kernel_sync()
#define arch_enable_tag_checks_async() mte_enable_kernel_async()
#define arch_enable_tag_checks_asymm() mte_enable_kernel_asymm()
+#define arch_enable_tag_checks_write_only() mte_enable_kernel_store_only()
#define arch_suppress_tag_checks_start() mte_enable_tco()
#define arch_suppress_tag_checks_stop() mte_disable_tco()
#define arch_force_async_tag_fault() mte_check_tfsr_exit()
@@ -392,7 +397,6 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
* virt_to_page(x) convert a _valid_ virtual address to struct page *
* virt_addr_valid(x) indicates whether a virtual address is valid
*/
-#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if defined(CONFIG_DEBUG_VIRTUAL)
#define page_to_virt(x) ({ \
@@ -422,7 +426,7 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
})
void dump_mem_limit(void);
-#endif /* !ASSEMBLY */
+#endif /* !__ASSEMBLER__ */
/*
* Given that the GIC architecture permits ITS implementations that can only be
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 1d53022fc7e1..8770c7ee759f 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -7,13 +7,14 @@
#ifndef BUILD_VDSO
#include <linux/compiler.h>
#include <linux/fs.h>
+#include <linux/hugetlb.h>
#include <linux/shmem_fs.h>
#include <linux/types.h>
-static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+static inline vm_flags_t arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
- unsigned long ret = 0;
+ vm_flags_t ret = 0;
if (system_supports_bti() && (prot & PROT_BTI))
ret |= VM_ARM64_BTI;
@@ -33,8 +34,8 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
-static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
- unsigned long flags)
+static inline vm_flags_t arch_calc_vm_flag_bits(struct file *file,
+ unsigned long flags)
{
/*
* Only allow MTE on anonymous mappings as these are guaranteed to be
@@ -44,7 +45,7 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
if (system_supports_mte()) {
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
return VM_MTE_ALLOWED;
- if (shmem_file(file))
+ if (shmem_file(file) || is_file_hugepages(file))
return VM_MTE_ALLOWED;
}
@@ -67,7 +68,7 @@ static inline bool arch_validate_prot(unsigned long prot,
}
#define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
-static inline bool arch_validate_flags(unsigned long vm_flags)
+static inline bool arch_validate_flags(vm_flags_t vm_flags)
{
if (system_supports_mte()) {
/*
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 2ec96d91acc6..5e1211c540ab 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -10,9 +10,8 @@
#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
#define USER_ASID_BIT 48
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
-#define TTBR_ASID_MASK (UL(0xffff) << 48)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/refcount.h>
#include <asm/cpufeature.h>
@@ -71,6 +70,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
pgprot_t prot, bool page_mappings_only);
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
extern void mark_linear_text_alias_ro(void);
+extern int split_kernel_leaf_mapping(unsigned long start, unsigned long end);
+extern void linear_map_maybe_split_to_ptes(void);
/*
* This check is triggered during the early boot before the cpufeature
@@ -94,23 +95,16 @@ static inline bool kaslr_requires_kpti(void)
return false;
}
- /*
- * Systems affected by Cavium erratum 24756 are incompatible
- * with KPTI.
- */
- if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
- extern const struct midr_range cavium_erratum_27456_cpus[];
-
- if (is_midr_in_range_list(read_cpuid_id(),
- cavium_erratum_27456_cpus))
- return false;
- }
-
return true;
}
-#define INIT_MM_CONTEXT(name) \
- .pgd = swapper_pg_dir,
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+void kpti_install_ng_mappings(void);
+#else
+static inline void kpti_install_ng_mappings(void) {}
+#endif
+
+extern bool page_alloc_available;
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 48b3d9553b67..803b68758152 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -8,7 +8,7 @@
#ifndef __ASM_MMU_CONTEXT_H
#define __ASM_MMU_CONTEXT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/sched.h>
@@ -62,29 +62,21 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
}
/*
- * TCR.T0SZ value to use when the ID map is active.
- */
-#define idmap_t0sz TCR_T0SZ(IDMAP_VA_BITS)
-
-/*
* Ensure TCR.T0SZ is set to the provided value.
*/
static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
{
unsigned long tcr = read_sysreg(tcr_el1);
- if ((tcr & TCR_T0SZ_MASK) == t0sz)
+ if ((tcr & TCR_EL1_T0SZ_MASK) == t0sz)
return;
- tcr &= ~TCR_T0SZ_MASK;
+ tcr &= ~TCR_EL1_T0SZ_MASK;
tcr |= t0sz;
write_sysreg(tcr, tcr_el1);
isb();
}
-#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
-#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
-
/*
* Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
*
@@ -103,7 +95,7 @@ static inline void cpu_uninstall_idmap(void)
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- cpu_set_default_tcr_t0sz();
+ __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual));
if (mm != &init_mm && !system_uses_ttbr0_pan())
cpu_switch_mm(mm->pgd, mm);
@@ -113,7 +105,7 @@ static inline void cpu_install_idmap(void)
{
cpu_set_reserved_ttbr0();
local_flush_tlb_all();
- cpu_set_idmap_tcr_t0sz();
+ __cpu_set_tcr_t0sz(TCR_T0SZ(IDMAP_VA_BITS));
cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
}
@@ -218,7 +210,8 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
if (mm == &init_mm)
ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
- ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
+ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) |
+ FIELD_PREP(TTBRx_EL1_ASID_MASK, ASID(mm));
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
@@ -271,18 +264,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
static inline const struct cpumask *
-task_cpu_possible_mask(struct task_struct *p)
+__task_cpu_possible_mask(struct task_struct *p, const struct cpumask *mask)
{
if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
- return cpu_possible_mask;
+ return mask;
if (!is_compat_thread(task_thread_info(p)))
- return cpu_possible_mask;
+ return mask;
return system_32bit_el0_cpumask();
}
+
+static inline const struct cpumask *
+task_cpu_possible_mask(struct task_struct *p)
+{
+ return __task_cpu_possible_mask(p, cpu_possible_mask);
+}
#define task_cpu_possible_mask task_cpu_possible_mask
+const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
+
void verify_cpu_asid_bits(void);
void post_ttbr_update_workaround(void);
@@ -322,6 +323,6 @@ static inline void deactivate_mm(struct task_struct *tsk,
#include <asm-generic/mmu_context.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* !__ASM_MMU_CONTEXT_H */
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 79550b22ba19..fb9b88eebeb1 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -19,6 +19,7 @@ struct mod_arch_specific {
/* for CONFIG_DYNAMIC_FTRACE */
struct plt_entry *ftrace_trampolines;
+ struct plt_entry *init_ftrace_trampolines;
};
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index b9ae8349e35d..fb944b46846d 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -2,6 +2,7 @@ SECTIONS {
.plt 0 : { BYTE(0) }
.init.plt 0 : { BYTE(0) }
.text.ftrace_trampoline 0 : { BYTE(0) }
+ .init.text.ftrace_trampoline 0 : { BYTE(0) }
#ifdef CONFIG_KASAN_SW_TAGS
/*
diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h
new file mode 100644
index 000000000000..70d396e7b6da
--- /dev/null
+++ b/arch/arm64/include/asm/mpam.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2025 Arm Ltd. */
+
+#ifndef __ASM__MPAM_H
+#define __ASM__MPAM_H
+
+#include <linux/arm_mpam.h>
+#include <linux/bitfield.h>
+#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/sched.h>
+
+#include <asm/sysreg.h>
+
+DECLARE_STATIC_KEY_FALSE(mpam_enabled);
+DECLARE_PER_CPU(u64, arm64_mpam_default);
+DECLARE_PER_CPU(u64, arm64_mpam_current);
+
+/*
+ * The value of the MPAM0_EL1 sysreg when a task is in resctrl's default group.
+ * This is used by the context switch code to use the resctrl CPU property
+ * instead. The value is modified when CDP is enabled/disabled by mounting
+ * the resctrl filesystem.
+ */
+extern u64 arm64_mpam_global_default;
+
+#ifdef CONFIG_ARM64_MPAM
+static inline u64 __mpam_regval(u16 partid_d, u16 partid_i, u8 pmg_d, u8 pmg_i)
+{
+ return FIELD_PREP(MPAM0_EL1_PARTID_D, partid_d) |
+ FIELD_PREP(MPAM0_EL1_PARTID_I, partid_i) |
+ FIELD_PREP(MPAM0_EL1_PMG_D, pmg_d) |
+ FIELD_PREP(MPAM0_EL1_PMG_I, pmg_i);
+}
+
+static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i,
+ u8 pmg_d, u8 pmg_i)
+{
+ u64 default_val = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
+
+ WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val);
+}
+
+/*
+ * The resctrl filesystem writes to the partid/pmg values for threads and CPUs,
+ * which may race with reads in mpam_thread_switch(). Ensure only one of the old
+ * or new values are used. Particular care should be taken with the pmg field as
+ * mpam_thread_switch() may read a partid and pmg that don't match, causing this
+ * value to be stored with cache allocations, despite being considered 'free' by
+ * resctrl.
+ */
+static inline u64 mpam_get_regval(struct task_struct *tsk)
+{
+ return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg);
+}
+
+static inline void mpam_set_task_partid_pmg(struct task_struct *tsk,
+ u16 partid_d, u16 partid_i,
+ u8 pmg_d, u8 pmg_i)
+{
+ u64 regval = __mpam_regval(partid_d, partid_i, pmg_d, pmg_i);
+
+ WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval);
+}
+
+static inline void mpam_thread_switch(struct task_struct *tsk)
+{
+ u64 oldregval;
+ int cpu = smp_processor_id();
+ u64 regval = mpam_get_regval(tsk);
+
+ if (!static_branch_likely(&mpam_enabled))
+ return;
+
+ if (regval == READ_ONCE(arm64_mpam_global_default))
+ regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu));
+
+ oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu));
+ if (oldregval == regval)
+ return;
+
+ write_sysreg_s(regval | MPAM1_EL1_MPAMEN, SYS_MPAM1_EL1);
+ if (system_supports_sme())
+ write_sysreg_s(regval & (MPAMSM_EL1_PARTID_D | MPAMSM_EL1_PMG_D), SYS_MPAMSM_EL1);
+ isb();
+
+ /* Synchronising the EL0 write is left until the ERET to EL0 */
+ write_sysreg_s(regval, SYS_MPAM0_EL1);
+
+ WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval);
+}
+#else
+static inline void mpam_thread_switch(struct task_struct *tsk) {}
+#endif /* CONFIG_ARM64_MPAM */
+
+#endif /* __ASM__MPAM_H */
diff --git a/arch/arm64/include/asm/mshyperv.h b/arch/arm64/include/asm/mshyperv.h
index a975e1a689dd..b721d3134ab6 100644
--- a/arch/arm64/include/asm/mshyperv.h
+++ b/arch/arm64/include/asm/mshyperv.h
@@ -6,9 +6,8 @@
* the ARM64 architecture. See include/asm-generic/mshyperv.h for
* definitions are that architecture independent.
*
- * Definitions that are specified in the Hyper-V Top Level Functional
- * Spec (TLFS) should not go in this file, but should instead go in
- * hyperv-tlfs.h.
+ * Definitions that are derived from Hyper-V code or headers should not go in
+ * this file, but should instead go in the relevant files in include/hyperv.
*
* Copyright (C) 2021, Microsoft, Inc.
*
@@ -20,7 +19,7 @@
#include <linux/types.h>
#include <linux/arm-smccc.h>
-#include <asm/hyperv-tlfs.h>
+#include <hyperv/hvhdk.h>
/*
* Declare calls to get and set Hyper-V VP register values on ARM64, which
@@ -41,6 +40,19 @@ static inline u64 hv_get_msr(unsigned int reg)
return hv_get_vpreg(reg);
}
+/*
+ * Nested is not supported on arm64
+ */
+static inline void hv_set_non_nested_msr(unsigned int reg, u64 value)
+{
+ hv_set_msr(reg, value);
+}
+
+static inline u64 hv_get_non_nested_msr(unsigned int reg)
+{
+ return hv_get_msr(reg);
+}
+
/* SMCCC hypercall parameters */
#define HV_SMCCC_FUNC_NUMBER 1
#define HV_FUNC_ID ARM_SMCCC_CALL_VAL( \
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 2e98028c1965..352139271918 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -9,7 +9,7 @@
#include <asm/cputype.h>
#include <asm/mte-def.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -200,6 +200,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
void mte_enable_kernel_sync(void);
void mte_enable_kernel_async(void);
void mte_enable_kernel_asymm(void);
+int mte_enable_kernel_store_only(void);
#else /* CONFIG_ARM64_MTE */
@@ -251,8 +252,13 @@ static inline void mte_enable_kernel_asymm(void)
{
}
+static inline int mte_enable_kernel_store_only(void)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_ARM64_MTE */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_MTE_KASAN_H */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 6567df8ec8ca..7f7b97e09996 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -8,7 +8,7 @@
#include <asm/compiler.h>
#include <asm/mte-def.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitfield.h>
#include <linux/kasan-enabled.h>
@@ -48,12 +48,12 @@ static inline void set_page_mte_tagged(struct page *page)
* before the page flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &page->flags);
+ set_bit(PG_mte_tagged, &page->flags.f);
}
static inline bool page_mte_tagged(struct page *page)
{
- bool ret = test_bit(PG_mte_tagged, &page->flags);
+ bool ret = test_bit(PG_mte_tagged, &page->flags.f);
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
@@ -82,7 +82,7 @@ static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));
- if (!test_and_set_bit(PG_mte_lock, &page->flags))
+ if (!test_and_set_bit(PG_mte_lock, &page->flags.f))
return true;
/*
@@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&page->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -173,13 +173,13 @@ static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
* before the folio flags update.
*/
smp_wmb();
- set_bit(PG_mte_tagged, &folio->flags);
+ set_bit(PG_mte_tagged, &folio->flags.f);
}
static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
- bool ret = test_bit(PG_mte_tagged, &folio->flags);
+ bool ret = test_bit(PG_mte_tagged, &folio->flags.f);
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
@@ -196,7 +196,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));
- if (!test_and_set_bit(PG_mte_lock, &folio->flags))
+ if (!test_and_set_bit(PG_mte_lock, &folio->flags.f))
return true;
/*
@@ -204,7 +204,7 @@ static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
- smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));
+ smp_cond_load_acquire(&folio->flags.f, VAL & (1UL << PG_mte_tagged));
return false;
}
@@ -252,6 +252,9 @@ static inline void mte_check_tfsr_entry(void)
if (!kasan_hw_tags_enabled())
return;
+ if (!system_uses_mte_async_or_asymm_mode())
+ return;
+
mte_check_tfsr_el1();
}
@@ -260,6 +263,9 @@ static inline void mte_check_tfsr_exit(void)
if (!kasan_hw_tags_enabled())
return;
+ if (!system_uses_mte_async_or_asymm_mode())
+ return;
+
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
@@ -282,5 +288,5 @@ static inline void mte_check_tfsr_exit(void)
}
#endif /* CONFIG_KASAN_HW_TAGS */
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_MTE_H */
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
index d4b1d172a79b..acebee4605b5 100644
--- a/arch/arm64/include/asm/neon.h
+++ b/arch/arm64/include/asm/neon.h
@@ -13,7 +13,7 @@
#define cpu_has_neon() system_supports_fpsimd()
-void kernel_neon_begin(void);
-void kernel_neon_end(void);
+void kernel_neon_begin(struct user_fpsimd_state *);
+void kernel_neon_end(struct user_fpsimd_state *);
#endif /* ! __ASM_NEON_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 2312e6ee595f..e25d0d18f6d7 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -10,7 +10,7 @@
#include <asm/page-def.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <linux/types.h> /* for gfp_t */
@@ -33,10 +33,9 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
unsigned long vaddr);
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
-void tag_clear_highpage(struct page *to);
-#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
+bool tag_clear_highpages(struct page *to, int numpages);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
-#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t;
@@ -45,9 +44,14 @@ int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
-#define VM_DATA_DEFAULT_FLAGS (VM_DATA_FLAGS_TSK_EXEC | VM_MTE_ALLOWED)
+#ifdef CONFIG_ARM64_MTE
+#define VMA_DATA_DEFAULT_FLAGS append_vma_flags(VMA_DATA_FLAGS_TSK_EXEC, \
+ VMA_MTE_ALLOWED_BIT)
+#else
+#define VMA_DATA_DEFAULT_FLAGS VMA_DATA_FLAGS_TSK_EXEC
+#endif
#include <asm-generic/getorder.h>
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index 9aa193e0e8f2..cb037e742372 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -3,20 +3,6 @@
#define _ASM_ARM64_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
-#include <linux/static_call_types.h>
-
-struct static_key;
-extern struct static_key paravirt_steal_enabled;
-extern struct static_key paravirt_steal_rq_enabled;
-
-u64 dummy_steal_clock(int cpu);
-
-DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
-
-static inline u64 paravirt_steal_clock(int cpu)
-{
- return static_call(pv_steal_clock)(cpu);
-}
int __init pv_time_init(void);
diff --git a/arch/arm64/include/asm/paravirt_api_clock.h b/arch/arm64/include/asm/paravirt_api_clock.h
deleted file mode 100644
index 65ac7cee0dad..000000000000
--- a/arch/arm64/include/asm/paravirt_api_clock.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm/paravirt.h>
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 9abcc8ef3087..b57b2bb00967 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -77,7 +77,7 @@ __percpu_##name##_case_##sz(void *ptr, unsigned long val) \
" stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n" \
" cbnz %w[loop], 1b", \
/* LSE atomics */ \
- #op_lse "\t%" #w "[val], %[ptr]\n" \
+ #op_lse "\t%" #w "[val], %" #w "[tmp], %[ptr]\n" \
__nops(3)) \
: [loop] "=&r" (loop), [tmp] "=&r" (tmp), \
[ptr] "+Q"(*(u##sz *)ptr) \
@@ -124,9 +124,16 @@ PERCPU_RW_OPS(8)
PERCPU_RW_OPS(16)
PERCPU_RW_OPS(32)
PERCPU_RW_OPS(64)
-PERCPU_OP(add, add, stadd)
-PERCPU_OP(andnot, bic, stclr)
-PERCPU_OP(or, orr, stset)
+
+/*
+ * Use value-returning atomics for CPU-local ops as they are more likely
+ * to execute "near" to the CPU (e.g. in L1$).
+ *
+ * https://lore.kernel.org/r/e7d539ed-ced0-4b96-8ecd-048a5b803b85@paulmck-laptop
+ */
+PERCPU_OP(add, add, ldadd)
+PERCPU_OP(andnot, bic, ldclr)
+PERCPU_OP(or, orr, ldset)
PERCPU_RET_OP(add, add, ldadd)
#undef PERCPU_RW_OPS
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e75422864d1b..1b4509d3382c 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -85,24 +85,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp)
__pgd_populate(pgdp, __pa(p4dp), pgdval);
}
-static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
-}
-
-static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
-{
- if (!pgtable_l5_enabled())
- return;
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
-}
-
-#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
#else
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot)
{
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index c78a988cca93..72f31800c703 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -7,40 +7,46 @@
#include <asm/memory.h>
+#define PTDESC_ORDER 3
+
+/* Number of VA bits resolved by a single translation table level */
+#define PTDESC_TABLE_SHIFT (PAGE_SHIFT - PTDESC_ORDER)
+
/*
* Number of page-table levels required to address 'va_bits' wide
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
- * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
+ * bits with PTDESC_TABLE_SHIFT bits at each page table level. Hence:
*
- * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
+ * levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), PTDESC_TABLE_SHIFT)
*
* where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
*
* We cannot include linux/kernel.h which defines DIV_ROUND_UP here
* due to build issues. So we open code DIV_ROUND_UP here:
*
- * ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
+ * ((((va_bits) - PAGE_SHIFT) + PTDESC_TABLE_SHIFT - 1) / PTDESC_TABLE_SHIFT)
*
* which gets simplified as :
*/
-#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
+#define ARM64_HW_PGTABLE_LEVELS(va_bits) \
+ (((va_bits) - PTDESC_ORDER - 1) / PTDESC_TABLE_SHIFT)
/*
* Size mapped by an entry at level n ( -1 <= n <= 3)
- * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
+ * We map PTDESC_TABLE_SHIFT at all translation levels and PAGE_SHIFT bits
* in the final page. The maximum number of translation levels supported by
* the architecture is 5. Hence, starting at level n, we have further
* ((4 - n) - 1) levels of translation excluding the offset within the page.
* So, the total number of bits mapped by an entry at level n is :
*
- * ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
+ * ((4 - n) - 1) * PTDESC_TABLE_SHIFT + PAGE_SHIFT
*
* Rearranging it a bit we get :
- * (4 - n) * (PAGE_SHIFT - 3) + 3
+ * (4 - n) * PTDESC_TABLE_SHIFT + PTDESC_ORDER
*/
-#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
+#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) (PTDESC_TABLE_SHIFT * (4 - (n)) + PTDESC_ORDER)
-#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PTE (1 << PTDESC_TABLE_SHIFT)
/*
* PMD_SHIFT determines the size a level 2 page table entry can map.
@@ -49,7 +55,7 @@
#define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PTRS_PER_PMD (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PMD (1 << PTDESC_TABLE_SHIFT)
#endif
/*
@@ -59,14 +65,14 @@
#define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
-#define PTRS_PER_PUD (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_PUD (1 << PTDESC_TABLE_SHIFT)
#endif
#if CONFIG_PGTABLE_LEVELS > 4
#define P4D_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(0)
#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
#define P4D_MASK (~(P4D_SIZE-1))
-#define PTRS_PER_P4D (1 << (PAGE_SHIFT - 3))
+#define PTRS_PER_P4D (1 << PTDESC_TABLE_SHIFT)
#endif
/*
@@ -97,7 +103,6 @@
* Level -1 descriptor (PGD).
*/
#define PGD_TYPE_TABLE (_AT(pgdval_t, 3) << 0)
-#define PGD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
#define PGD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
#define PGD_TABLE_AF (_AT(pgdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */
#define PGD_TABLE_PXN (_AT(pgdval_t, 1) << 59)
@@ -107,7 +112,6 @@
* Level 0 descriptor (P4D).
*/
#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0)
-#define P4D_TABLE_BIT (_AT(p4dval_t, 1) << 1)
#define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0)
#define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0)
#define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */
@@ -119,7 +123,6 @@
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
-#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
@@ -133,7 +136,6 @@
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
-#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
#define PMD_TABLE_AF (_AT(pmdval_t, 1) << 10) /* Ignored if no FEAT_HAFT */
/*
@@ -162,7 +164,6 @@
#define PTE_VALID (_AT(pteval_t, 1) << 0)
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
-#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
@@ -223,112 +224,55 @@
#define S1_TABLE_AP (_AT(pmdval_t, 3) << 61)
/*
- * Highest possible physical address supported.
- */
-#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
-#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
-
-#define TTBR_CNP_BIT (UL(1) << 0)
-
-/*
* TCR flags.
*/
-#define TCR_T0SZ_OFFSET 0
-#define TCR_T1SZ_OFFSET 16
-#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
-#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
-#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
-#define TCR_TxSZ_WIDTH 6
-#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
-#define TCR_T1SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T1SZ_OFFSET)
-
-#define TCR_EPD0_SHIFT 7
-#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT)
-#define TCR_IRGN0_SHIFT 8
-#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
-#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
-#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
-#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
-#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
-
-#define TCR_EPD1_SHIFT 23
-#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT)
-#define TCR_IRGN1_SHIFT 24
-#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT)
-#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT)
-#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT)
-#define TCR_IRGN1_WT (UL(2) << TCR_IRGN1_SHIFT)
-#define TCR_IRGN1_WBnWA (UL(3) << TCR_IRGN1_SHIFT)
-
-#define TCR_IRGN_NC (TCR_IRGN0_NC | TCR_IRGN1_NC)
-#define TCR_IRGN_WBWA (TCR_IRGN0_WBWA | TCR_IRGN1_WBWA)
-#define TCR_IRGN_WT (TCR_IRGN0_WT | TCR_IRGN1_WT)
-#define TCR_IRGN_WBnWA (TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA)
-#define TCR_IRGN_MASK (TCR_IRGN0_MASK | TCR_IRGN1_MASK)
-
-
-#define TCR_ORGN0_SHIFT 10
-#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)
-#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)
-#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
-#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)
-#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)
-
-#define TCR_ORGN1_SHIFT 26
-#define TCR_ORGN1_MASK (UL(3) << TCR_ORGN1_SHIFT)
-#define TCR_ORGN1_NC (UL(0) << TCR_ORGN1_SHIFT)
-#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT)
-#define TCR_ORGN1_WT (UL(2) << TCR_ORGN1_SHIFT)
-#define TCR_ORGN1_WBnWA (UL(3) << TCR_ORGN1_SHIFT)
-
-#define TCR_ORGN_NC (TCR_ORGN0_NC | TCR_ORGN1_NC)
-#define TCR_ORGN_WBWA (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)
-#define TCR_ORGN_WT (TCR_ORGN0_WT | TCR_ORGN1_WT)
-#define TCR_ORGN_WBnWA (TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA)
-#define TCR_ORGN_MASK (TCR_ORGN0_MASK | TCR_ORGN1_MASK)
-
-#define TCR_SH0_SHIFT 12
-#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)
-#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)
-
-#define TCR_SH1_SHIFT 28
-#define TCR_SH1_MASK (UL(3) << TCR_SH1_SHIFT)
-#define TCR_SH1_INNER (UL(3) << TCR_SH1_SHIFT)
-#define TCR_SHARED (TCR_SH0_INNER | TCR_SH1_INNER)
-
-#define TCR_TG0_SHIFT 14
-#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
-#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
-#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
-#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
-
-#define TCR_TG1_SHIFT 30
-#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT)
-#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT)
-#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
-#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
-
-#define TCR_IPS_SHIFT 32
-#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
-#define TCR_A1 (UL(1) << 22)
-#define TCR_ASID16 (UL(1) << 36)
-#define TCR_TBI0 (UL(1) << 37)
-#define TCR_TBI1 (UL(1) << 38)
-#define TCR_HA (UL(1) << 39)
-#define TCR_HD (UL(1) << 40)
-#define TCR_HPD0_SHIFT 41
-#define TCR_HPD0 (UL(1) << TCR_HPD0_SHIFT)
-#define TCR_HPD1_SHIFT 42
-#define TCR_HPD1 (UL(1) << TCR_HPD1_SHIFT)
-#define TCR_TBID0 (UL(1) << 51)
-#define TCR_TBID1 (UL(1) << 52)
-#define TCR_NFD0 (UL(1) << 53)
-#define TCR_NFD1 (UL(1) << 54)
-#define TCR_E0PD0 (UL(1) << 55)
-#define TCR_E0PD1 (UL(1) << 56)
-#define TCR_TCMA0 (UL(1) << 57)
-#define TCR_TCMA1 (UL(1) << 58)
-#define TCR_DS (UL(1) << 59)
+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_EL1_T0SZ_SHIFT)
+#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_EL1_T1SZ_SHIFT)
+
+#define TCR_T0SZ_MASK TCR_EL1_T0SZ_MASK
+#define TCR_T1SZ_MASK TCR_EL1_T1SZ_MASK
+
+#define TCR_EPD0_MASK TCR_EL1_EPD0_MASK
+#define TCR_EPD1_MASK TCR_EL1_EPD1_MASK
+
+#define TCR_IRGN0_MASK TCR_EL1_IRGN0_MASK
+#define TCR_IRGN0_WBWA (TCR_EL1_IRGN0_WBWA << TCR_EL1_IRGN0_SHIFT)
+
+#define TCR_ORGN0_MASK TCR_EL1_ORGN0_MASK
+#define TCR_ORGN0_WBWA (TCR_EL1_ORGN0_WBWA << TCR_EL1_ORGN0_SHIFT)
+
+#define TCR_SH0_MASK TCR_EL1_SH0_MASK
+#define TCR_SH0_INNER (TCR_EL1_SH0_INNER << TCR_EL1_SH0_SHIFT)
+
+#define TCR_SH1_MASK TCR_EL1_SH1_MASK
+
+#define TCR_TG0_SHIFT TCR_EL1_TG0_SHIFT
+#define TCR_TG0_MASK TCR_EL1_TG0_MASK
+#define TCR_TG0_4K (TCR_EL1_TG0_4K << TCR_EL1_TG0_SHIFT)
+#define TCR_TG0_64K (TCR_EL1_TG0_64K << TCR_EL1_TG0_SHIFT)
+#define TCR_TG0_16K (TCR_EL1_TG0_16K << TCR_EL1_TG0_SHIFT)
+
+#define TCR_TG1_SHIFT TCR_EL1_TG1_SHIFT
+#define TCR_TG1_MASK TCR_EL1_TG1_MASK
+#define TCR_TG1_16K (TCR_EL1_TG1_16K << TCR_EL1_TG1_SHIFT)
+#define TCR_TG1_4K (TCR_EL1_TG1_4K << TCR_EL1_TG1_SHIFT)
+#define TCR_TG1_64K (TCR_EL1_TG1_64K << TCR_EL1_TG1_SHIFT)
+
+#define TCR_IPS_SHIFT TCR_EL1_IPS_SHIFT
+#define TCR_IPS_MASK TCR_EL1_IPS_MASK
+#define TCR_A1 TCR_EL1_A1
+#define TCR_ASID16 TCR_EL1_AS
+#define TCR_TBI0 TCR_EL1_TBI0
+#define TCR_TBI1 TCR_EL1_TBI1
+#define TCR_HA TCR_EL1_HA
+#define TCR_HD TCR_EL1_HD
+#define TCR_HPD0 TCR_EL1_HPD0
+#define TCR_HPD1 TCR_EL1_HPD1
+#define TCR_TBID0 TCR_EL1_TBID0
+#define TCR_TBID1 TCR_EL1_TBID1
+#define TCR_E0PD0 TCR_EL1_E0PD0
+#define TCR_E0PD1 TCR_EL1_E0PD1
+#define TCR_DS TCR_EL1_DS
/*
* TTBR.
@@ -341,9 +285,12 @@
#endif
#ifdef CONFIG_ARM64_VA_BITS_52
+#define PTRS_PER_PGD_52_VA (UL(1) << (52 - PGDIR_SHIFT))
+#define PTRS_PER_PGD_48_VA (UL(1) << (48 - PGDIR_SHIFT))
+#define PTRS_PER_PGD_EXTRA (PTRS_PER_PGD_52_VA - PTRS_PER_PGD_48_VA)
+
/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
-#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
- (UL(1) << (48 - PGDIR_SHIFT))) * 8)
+#define TTBR1_BADDR_4852_OFFSET (PTRS_PER_PGD_EXTRA << PTDESC_ORDER)
#endif
#endif
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 9f9cf13bbd95..212ce1b02e15 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -17,7 +17,6 @@
#define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-#define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
/*
* PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be
@@ -26,6 +25,8 @@
*/
#define PTE_PRESENT_INVALID (PTE_NG) /* only when !PTE_VALID */
+#define PTE_PRESENT_VALID_KERNEL (PTE_VALID | PTE_MAYBE_NG)
+
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
#define PTE_UFFD_WP (_AT(pteval_t, 1) << 58) /* uffd-wp tracking */
#define PTE_SWP_UFFD_WP (_AT(pteval_t, 1) << 3) /* only for swp ptes */
@@ -51,11 +52,11 @@
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
-#define _PAGE_KERNEL (PROT_NORMAL)
-#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
-#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
-#define _PAGE_KERNEL_EXEC (PROT_NORMAL & ~PTE_PXN)
-#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
+#define _PAGE_KERNEL (PROT_NORMAL | PTE_DIRTY)
+#define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY | PTE_DIRTY)
+#define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY | PTE_DIRTY)
+#define _PAGE_KERNEL_EXEC ((PROT_NORMAL & ~PTE_PXN) | PTE_DIRTY)
+#define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT | PTE_DIRTY)
#define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
@@ -63,7 +64,7 @@
#define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/cpufeature.h>
#include <asm/pgtable-types.h>
@@ -81,17 +82,24 @@ extern unsigned long prot_ns_shared;
#define lpa2_is_enabled() false
#define PTE_MAYBE_SHARED PTE_SHARED
#define PMD_MAYBE_SHARED PMD_SECT_S
+#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#else
static inline bool __pure lpa2_is_enabled(void)
{
- return read_tcr() & TCR_DS;
+ return read_tcr() & TCR_EL1_DS;
}
#define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
#define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
+#define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
#endif
/*
+ * Highest possible physical address supported.
+ */
+#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
+
+/*
* If we have userspace only BTI we don't want to mark kernel pages
* guarded even if the system does support BTI.
*/
@@ -103,10 +111,10 @@ static inline bool __pure lpa2_is_enabled(void)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_KERNEL_EXEC_CONT)
-#define PAGE_S2_MEMATTR(attr, has_fwb) \
+#define PAGE_S2_MEMATTR(attr) \
({ \
u64 __val; \
- if (has_fwb) \
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) \
__val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
else \
__val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
@@ -121,7 +129,7 @@ static inline bool __pure lpa2_is_enabled(void)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_READONLY_EXEC)
#define PAGE_EXECONLY __pgprot(_PAGE_EXECONLY)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#define pte_pi_index(pte) ( \
((pte & BIT(PTE_PI_IDX_3)) >> (PTE_PI_IDX_3 - 3)) | \
@@ -158,29 +166,26 @@ static inline bool __pure lpa2_is_enabled(void)
#define _PAGE_GCS (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_WRITE | PTE_USER)
#define _PAGE_GCS_RO (_PAGE_DEFAULT | PTE_NG | PTE_UXN | PTE_USER)
-#define PAGE_GCS __pgprot(_PAGE_GCS)
-#define PAGE_GCS_RO __pgprot(_PAGE_GCS_RO)
-
#define PIE_E0 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_GCS) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
#define PIE_E1 ( \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_GCS_RO), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
- PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW))
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_GCS_RO), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_READONLY), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
+ PIRx_ELx_PERM_PREP(pte_pi_index(_PAGE_KERNEL), PIE_RW))
#endif /* __ASM_PGTABLE_PROT_H */
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
index 6d6d4065b0cb..265e8301d7ba 100644
--- a/arch/arm64/include/asm/pgtable-types.h
+++ b/arch/arm64/include/asm/pgtable-types.h
@@ -11,11 +11,19 @@
#include <asm/types.h>
-typedef u64 pteval_t;
-typedef u64 pmdval_t;
-typedef u64 pudval_t;
-typedef u64 p4dval_t;
-typedef u64 pgdval_t;
+/*
+ * Page Table Descriptor
+ *
+ * Generic page table descriptor format from which
+ * all level specific descriptors can be derived.
+ */
+typedef u64 ptdesc_t;
+
+typedef ptdesc_t pteval_t;
+typedef ptdesc_t pmdval_t;
+typedef ptdesc_t pudval_t;
+typedef ptdesc_t p4dval_t;
+typedef ptdesc_t pgdval_t;
/*
* These are used to make use of C type-checking..
@@ -46,7 +54,7 @@ typedef struct { pgdval_t pgd; } pgd_t;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) } )
-typedef struct { pteval_t pgprot; } pgprot_t;
+typedef struct { ptdesc_t pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 6986345b537a..9029b81ccbe8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -30,7 +30,7 @@
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/cmpxchg.h>
#include <asm/fixmap.h>
@@ -40,38 +40,76 @@
#include <linux/sched.h>
#include <linux/page_table_check.h>
+static inline void emit_pte_barriers(void)
+{
+ /*
+ * These barriers are emitted under certain conditions after a pte entry
+ * was modified (see e.g. __set_pte_complete()). The dsb makes the store
+ * visible to the table walker. The isb ensures that any previous
+ * speculative "invalid translation" marker that is in the CPU's
+ * pipeline gets cleared, so that any access to that address after
+ * setting the pte to valid won't cause a spurious fault. If the thread
+ * gets preempted after storing to the pgtable but before emitting these
+ * barriers, __switch_to() emits a dsb which ensure the walker gets to
+ * see the store. There is no guarantee of an isb being issued though.
+ * This is safe because it will still get issued (albeit on a
+ * potentially different CPU) when the thread starts running again,
+ * before any access to the address.
+ */
+ dsb(ishst);
+ isb();
+}
+
+static inline void queue_pte_barriers(void)
+{
+ if (is_lazy_mmu_mode_active()) {
+ /* Avoid the atomic op if already set. */
+ if (!test_thread_flag(TIF_LAZY_MMU_PENDING))
+ set_thread_flag(TIF_LAZY_MMU_PENDING);
+ } else {
+ emit_pte_barriers();
+ }
+}
+
+static inline void arch_enter_lazy_mmu_mode(void) {}
+
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING))
+ emit_pte_barriers();
+}
+
+static inline void arch_leave_lazy_mmu_mode(void)
+{
+ arch_flush_lazy_mmu_mode();
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
/* Set stride and tlb_level in flush_*_tlb_range */
#define flush_pmd_tlb_range(vma, addr, end) \
- __flush_tlb_range(vma, addr, end, PMD_SIZE, false, 2)
+ __flush_tlb_range(vma, addr, end, PMD_SIZE, 2, TLBF_NONE)
#define flush_pud_tlb_range(vma, addr, end) \
- __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
+ __flush_tlb_range(vma, addr, end, PUD_SIZE, 1, TLBF_NONE)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Outside of a few very special situations (e.g. hibernation), we always
- * use broadcast TLB invalidation instructions, therefore a spurious page
- * fault on one CPU which has been handled concurrently by another CPU
- * does not need to perform additional invalidation.
+ * We use local TLB invalidation instruction when reusing page in
+ * write protection fault handler to avoid TLBI broadcast in the hot
+ * path. This will cause spurious page faults if stale read-only TLB
+ * entries exist.
*/
-#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
+#define flush_tlb_fix_spurious_fault(vma, address, ptep) \
+ __flush_tlb_page(vma, address, TLBF_NOBROADCAST | TLBF_NONOTIFY)
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page))
+#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) \
+ __flush_tlb_range(vma, address, address + PMD_SIZE, PMD_SIZE, 2, \
+ TLBF_NOBROADCAST | TLBF_NONOTIFY | TLBF_NOWALKCACHE)
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016llx.\n", __FILE__, __LINE__, pte_val(e))
-/*
- * Macros to convert between a physical address and its placement in a
- * page table entry, taking care of 52-bit addresses.
- */
#ifdef CONFIG_ARM64_PA_BITS_52
static inline phys_addr_t __pte_to_phys(pte_t pte)
{
@@ -84,8 +122,15 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
return (phys | (phys >> PTE_ADDR_HIGH_SHIFT)) & PHYS_TO_PTE_ADDR_MASK;
}
#else
-#define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_LOW)
-#define __phys_to_pte_val(phys) (phys)
+static inline phys_addr_t __pte_to_phys(pte_t pte)
+{
+ return pte_val(pte) & PTE_ADDR_LOW;
+}
+
+static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
+{
+ return phys;
+}
#endif
#define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT)
@@ -93,8 +138,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
__pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_none(pte) (!pte_val(pte))
-#define __pte_clear(mm, addr, ptep) \
- __set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
/*
@@ -108,7 +151,6 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys)
#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
#define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
-#define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP))
#define pte_tagged(pte) ((pte_val(pte) & PTE_ATTRINDX_MASK) == \
PTE_ATTRINDX(MT_NORMAL_TAGGED))
@@ -212,7 +254,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
- pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
+ if (pte_sw_dirty(pte))
+ pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
return pte;
}
@@ -273,9 +316,11 @@ static inline pte_t pte_mknoncont(pte_t pte)
return clear_pte_bit(pte, __pgprot(PTE_CONT));
}
-static inline pte_t pte_mkpresent(pte_t pte)
+static inline pte_t pte_mkvalid_k(pte_t pte)
{
- return set_pte_bit(pte, __pgprot(PTE_VALID));
+ pte = clear_pte_bit(pte, __pgprot(PTE_PRESENT_INVALID));
+ pte = set_pte_bit(pte, __pgprot(PTE_PRESENT_VALID_KERNEL));
+ return pte;
}
static inline pte_t pte_mkinvalid(pte_t pte)
@@ -290,9 +335,9 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
}
-static inline pte_t pte_mkdevmap(pte_t pte)
+static inline pmd_t pmd_mknoncont(pmd_t pmd)
{
- return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL));
+ return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT);
}
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
@@ -317,18 +362,20 @@ static inline void __set_pte_nosync(pte_t *ptep, pte_t pte)
WRITE_ONCE(*ptep, pte);
}
-static inline void __set_pte(pte_t *ptep, pte_t pte)
+static inline void __set_pte_complete(pte_t pte)
{
- __set_pte_nosync(ptep, pte);
-
/*
* Only if the new pte is valid and kernel, otherwise TLB maintenance
- * or update_mmu_cache() have the necessary barriers.
+ * has the necessary barriers.
*/
- if (pte_valid_not_user(pte)) {
- dsb(ishst);
- isb();
- }
+ if (pte_valid_not_user(pte))
+ queue_pte_barriers();
+}
+
+static inline void __set_pte(pte_t *ptep, pte_t pte)
+{
+ __set_pte_nosync(ptep, pte);
+ __set_pte_complete(pte);
}
static inline pte_t __ptep_get(pte_t *ptep)
@@ -349,7 +396,7 @@ bool pgattr_change_is_safe(pteval_t old, pteval_t new);
* 1 0 | 1 0 1
* 1 1 | 0 1 x
*
- * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * When hardware DBM is not present, the software PTE_DIRTY bit is updated via
* the page fault mechanism. Checking the dirty status of a pte becomes:
*
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
@@ -420,23 +467,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte));
}
-static inline void __set_ptes(struct mm_struct *mm,
- unsigned long __always_unused addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
-{
- page_table_check_ptes_set(mm, ptep, pte, nr);
- __sync_cache_and_tags(pte, nr);
-
- for (;;) {
- __check_safe_pte_update(mm, ptep, pte);
- __set_pte(ptep, pte);
- if (--nr == 0)
- break;
- ptep++;
- pte = pte_advance_pfn(pte, 1);
- }
-}
-
/*
* Hugetlb definitions.
*/
@@ -483,12 +513,12 @@ static inline pmd_t pte_pmd(pte_t pte)
static inline pgprot_t mk_pud_sect_prot(pgprot_t prot)
{
- return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT);
+ return __pgprot((pgprot_val(prot) & ~PUD_TYPE_MASK) | PUD_TYPE_SECT);
}
static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot)
{
- return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT);
+ return __pgprot((pgprot_val(prot) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT);
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
@@ -496,7 +526,7 @@ static inline pte_t pte_swp_mkexclusive(pte_t pte)
return set_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
}
-static inline int pte_swp_exclusive(pte_t pte)
+static inline bool pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & PTE_SWP_EXCLUSIVE;
}
@@ -532,7 +562,7 @@ static inline int pte_protnone(pte_t pte)
/*
* pte_present_invalid() tells us that the pte is invalid from HW
* perspective but present from SW perspective, so the fields are to be
- * interpretted as per the HW layout. The second 2 checks are the unique
+ * interpreted as per the HW layout. The second 2 checks are the unique
* encoding that we use for PROT_NONE. It is insufficient to only use
* the first check because we share the same encoding scheme with pmds
* which support pmd_mkinvalid(), so can be present-invalid without
@@ -548,18 +578,6 @@ static inline int pmd_protnone(pmd_t pmd)
#endif
#define pmd_present(pmd) pte_present(pmd_pte(pmd))
-
-/*
- * THP definitions.
- */
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline int pmd_trans_huge(pmd_t pmd)
-{
- return pmd_val(pmd) && pmd_present(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
@@ -572,6 +590,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkvalid_k(pmd) pte_pmd(pte_mkvalid_k(pmd_pte(pmd)))
#define pmd_mkinvalid(pmd) pte_pmd(pte_mkinvalid(pmd_pte(pmd)))
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
#define pmd_uffd_wp(pmd) pte_uffd_wp(pmd_pte(pmd))
@@ -585,14 +604,17 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
-#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd))
-#endif
-static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
- return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP)));
+ /*
+ * It's possible that the pmd is present-invalid on entry
+ * and in that case it needs to remain present-invalid on
+ * exit. So ensure the VALID bit does not get modified.
+ */
+ pmdval_t mask = PMD_TYPE_MASK & ~PTE_VALID;
+ pmdval_t val = PMD_TYPE_SECT & ~PTE_VALID;
+
+ return __pmd((pmd_val(pmd) & ~mask) | val);
}
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
@@ -607,24 +629,31 @@ static inline pmd_t pmd_mkspecial(pmd_t pmd)
#define __phys_to_pmd_val(phys) __phys_to_pte_val(phys)
#define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT)
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
#define pud_young(pud) pte_young(pud_pte(pud))
#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
+#define pud_mkwrite_novma(pud) pte_pud(pte_mkwrite_novma(pud_pte(pud)))
+#define pud_mkvalid_k(pud) pte_pud(pte_mkvalid_k(pud_pte(pud)))
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+ /*
+ * It's possible that the pud is present-invalid on entry
+ * and in that case it needs to remain present-invalid on
+ * exit. So ensure the VALID bit does not get modified.
+ */
+ pudval_t mask = PUD_TYPE_MASK & ~PTE_VALID;
+ pudval_t val = PUD_TYPE_SECT & ~PTE_VALID;
+
+ return __pud((pud_val(pud) & ~mask) | val);
+}
#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
-#define pud_special(pte) pte_special(pud_pte(pud))
-#define pud_mkspecial(pte) pte_pud(pte_mkspecial(pud_pte(pud)))
-#endif
-
#define pmd_pgprot pmd_pgprot
static inline pgprot_t pmd_pgprot(pmd_t pmd)
{
@@ -641,30 +670,63 @@ static inline pgprot_t pud_pgprot(pud_t pud)
return __pgprot(pud_val(pfn_pud(pfn, __pgprot(0))) ^ pud_val(pud));
}
-static inline void __set_pte_at(struct mm_struct *mm,
- unsigned long __always_unused addr,
- pte_t *ptep, pte_t pte, unsigned int nr)
+static inline void __set_ptes_anysz(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr,
+ unsigned long pgsize)
{
- __sync_cache_and_tags(pte, nr);
- __check_safe_pte_update(mm, ptep, pte);
- __set_pte(ptep, pte);
+ unsigned long stride = pgsize >> PAGE_SHIFT;
+
+ switch (pgsize) {
+ case PAGE_SIZE:
+ page_table_check_ptes_set(mm, addr, ptep, pte, nr);
+ break;
+ case PMD_SIZE:
+ page_table_check_pmds_set(mm, addr, (pmd_t *)ptep,
+ pte_pmd(pte), nr);
+ break;
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ page_table_check_puds_set(mm, addr, (pud_t *)ptep,
+ pte_pud(pte), nr);
+ break;
+#endif
+ default:
+ VM_WARN_ON(1);
+ }
+
+ __sync_cache_and_tags(pte, nr * stride);
+
+ for (;;) {
+ __check_safe_pte_update(mm, ptep, pte);
+ __set_pte_nosync(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = pte_advance_pfn(pte, stride);
+ }
+
+ __set_pte_complete(pte);
}
-static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t pmd)
+static inline void __set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
- page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd),
- PMD_SIZE >> PAGE_SHIFT);
+ __set_ptes_anysz(mm, addr, ptep, pte, nr, PAGE_SIZE);
}
-static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
- pud_t *pudp, pud_t pud)
+static inline void __set_pmds(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd, unsigned int nr)
{
- page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud),
- PUD_SIZE >> PAGE_SHIFT);
+ __set_ptes_anysz(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), nr, PMD_SIZE);
}
+#define set_pmd_at(mm, addr, pmdp, pmd) __set_pmds(mm, addr, pmdp, pmd, 1)
+
+static inline void __set_puds(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud, unsigned int nr)
+{
+ __set_ptes_anysz(mm, addr, (pte_t *)pudp, pud_pte(pud), nr, PUD_SIZE);
+}
+#define set_pud_at(mm, addr, pudp, pud) __set_puds(mm, addr, pudp, pud, 1)
#define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d))
#define __phys_to_p4d_val(phys) __phys_to_pte_val(phys)
@@ -716,26 +778,36 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
-#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
- PMD_TYPE_SECT)
-#define pmd_leaf(pmd) (pmd_present(pmd) && !pmd_table(pmd))
+
+#define pmd_leaf pmd_leaf
+static inline bool pmd_leaf(pmd_t pmd)
+{
+ return pmd_present(pmd) && !pmd_table(pmd);
+}
+
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ /*
+ * If pmd is present-invalid, pmd_table() won't detect it
+ * as a table, so force the valid bit for the comparison.
+ */
+ return pmd_present(pmd) && !pmd_table(__pmd(pmd_val(pmd) | PTE_VALID));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-static inline bool pud_sect(pud_t pud) { return false; }
static inline bool pud_table(pud_t pud) { return true; }
#else
-#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
- PUD_TYPE_SECT)
#define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_TABLE)
#endif
-extern pgd_t init_pg_dir[];
-extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[];
extern pgd_t idmap_pg_dir[];
extern pgd_t tramp_pg_dir[];
@@ -760,10 +832,8 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
WRITE_ONCE(*pmdp, pmd);
- if (pmd_valid(pmd)) {
- dsb(ishst);
- isb();
- }
+ if (pmd_valid(pmd))
+ queue_pte_barriers();
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -793,22 +863,21 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
/* use ONLY for statically allocated translation tables */
#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
-
#if CONFIG_PGTABLE_LEVELS > 2
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!pud_table(pud))
+#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
+ PUD_TYPE_TABLE)
#define pud_present(pud) pte_present(pud_pte(pud))
#ifndef __PAGETABLE_PMD_FOLDED
-#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
+#define pud_leaf pud_leaf
+static inline bool pud_leaf(pud_t pud)
+{
+ return pud_present(pud) && !pud_table(pud);
+}
#else
#define pud_leaf(pud) false
#endif
@@ -827,10 +896,8 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
WRITE_ONCE(*pudp, pud);
- if (pud_valid(pud)) {
- dsb(ishst);
- isb();
- }
+ if (pud_valid(pud))
+ queue_pte_barriers();
}
static inline void pud_clear(pud_t *pudp)
@@ -896,7 +963,9 @@ static inline bool mm_pud_folded(const struct mm_struct *mm)
pr_err("%s:%d: bad pud %016llx.\n", __FILE__, __LINE__, pud_val(e))
#define p4d_none(p4d) (pgtable_l4_enabled() && !p4d_val(p4d))
-#define p4d_bad(p4d) (pgtable_l4_enabled() && !(p4d_val(p4d) & 2))
+#define p4d_bad(p4d) (pgtable_l4_enabled() && \
+ ((p4d_val(p4d) & P4D_TYPE_MASK) != \
+ P4D_TYPE_TABLE))
#define p4d_present(p4d) (!p4d_none(p4d))
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
@@ -907,8 +976,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
}
WRITE_ONCE(*p4dp, p4d);
- dsb(ishst);
- isb();
+ queue_pte_barriers();
}
static inline void p4d_clear(p4d_t *p4dp)
@@ -1023,7 +1091,9 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm)
pr_err("%s:%d: bad p4d %016llx.\n", __FILE__, __LINE__, p4d_val(e))
#define pgd_none(pgd) (pgtable_l5_enabled() && !pgd_val(pgd))
-#define pgd_bad(pgd) (pgtable_l5_enabled() && !(pgd_val(pgd) & 2))
+#define pgd_bad(pgd) (pgtable_l5_enabled() && \
+ ((pgd_val(pgd) & PGD_TYPE_MASK) != \
+ PGD_TYPE_TABLE))
#define pgd_present(pgd) (!pgd_none(pgd))
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
@@ -1034,8 +1104,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
}
WRITE_ONCE(*pgdp, pgd);
- dsb(ishst);
- isb();
+ queue_pte_barriers();
}
static inline void pgd_clear(pgd_t *pgdp)
@@ -1182,9 +1251,18 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
-extern int __ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty);
+extern int __ptep_set_access_flags_anysz(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty,
+ unsigned long pgsize);
+
+static inline int __ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ return __ptep_set_access_flags_anysz(vma, address, ptep, entry, dirty,
+ PAGE_SIZE);
+}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
@@ -1192,33 +1270,23 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty)
{
- return __ptep_set_access_flags(vma, address, (pte_t *)pmdp,
- pmd_pte(entry), dirty);
-}
-
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
+ return __ptep_set_access_flags_anysz(vma, address, (pte_t *)pmdp,
+ pmd_pte(entry), dirty, PMD_SIZE);
}
#endif
#ifdef CONFIG_PAGE_TABLE_CHECK
-static inline bool pte_user_accessible_page(pte_t pte)
+static inline bool pte_user_accessible_page(pte_t pte, unsigned long addr)
{
return pte_valid(pte) && (pte_user(pte) || pte_user_exec(pte));
}
-static inline bool pmd_user_accessible_page(pmd_t pmd)
+static inline bool pmd_user_accessible_page(pmd_t pmd, unsigned long addr)
{
return pmd_valid(pmd) && !pmd_table(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
}
-static inline bool pud_user_accessible_page(pud_t pud)
+static inline bool pud_user_accessible_page(pud_t pud, unsigned long addr)
{
return pud_valid(pud) && !pud_table(pud) && (pud_user(pud) || pud_user_exec(pud));
}
@@ -1227,9 +1295,15 @@ static inline bool pud_user_accessible_page(pud_t pud)
/*
* Atomic pte/pmd modifications.
*/
-static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
+
+static inline void __pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ __set_pte(ptep, __pte(0));
+}
+
+static inline bool __ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
pte_t old_pte, pte;
@@ -1244,10 +1318,10 @@ static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
return pte_young(pte);
}
-static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline bool __ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
- int young = __ptep_test_and_clear_young(vma, address, ptep);
+ bool young = __ptep_test_and_clear_young(vma, address, ptep);
if (young) {
/*
@@ -1258,7 +1332,7 @@ static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
* context-switch, which provides a DSB to complete the TLB
* invalidation.
*/
- flush_tlb_page_nosync(vma, address);
+ __flush_tlb_page(vma, address, TLBF_NOSYNC);
}
return young;
@@ -1266,9 +1340,8 @@ static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pmd_t *pmdp)
+static inline bool pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
{
/* Operation applies to PMD table entry only if FEAT_HAFT is enabled */
VM_WARN_ON(pmd_table(READ_ONCE(*pmdp)) && !system_supports_haft());
@@ -1276,16 +1349,38 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
-static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
+static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
+ unsigned long address,
+ pte_t *ptep,
+ unsigned long pgsize)
{
pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
- page_table_check_pte_clear(mm, pte);
+ switch (pgsize) {
+ case PAGE_SIZE:
+ page_table_check_pte_clear(mm, address, pte);
+ break;
+ case PMD_SIZE:
+ page_table_check_pmd_clear(mm, address, pte_pmd(pte));
+ break;
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ page_table_check_pud_clear(mm, address, pte_pud(pte));
+ break;
+#endif
+ default:
+ VM_WARN_ON(1);
+ }
return pte;
}
+static inline pte_t __ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __ptep_get_and_clear_anysz(mm, address, ptep, PAGE_SIZE);
+}
+
static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr, int full)
{
@@ -1322,11 +1417,7 @@ static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm,
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
- pmd_t pmd = __pmd(xchg_relaxed(&pmd_val(*pmdp), 0));
-
- page_table_check_pmd_clear(mm, pmd);
-
- return pmd;
+ return pte_pmd(__ptep_get_and_clear_anysz(mm, address, (pte_t *)pmdp, PMD_SIZE));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1345,7 +1436,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
}
/*
- * __ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * __ptep_set_wrprotect - mark read-only while transferring potential hardware
* dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
*/
static inline void __ptep_set_wrprotect(struct mm_struct *mm,
@@ -1415,7 +1506,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
- page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif
@@ -1519,6 +1610,14 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
#define arch_wants_old_prefaulted_pte cpu_has_hw_af
+/*
+ * Request exec memory is read into pagecache in at least 64K folios. This size
+ * can be contpte-mapped when 4K base pages are in use (16 pages into 1 iTLB
+ * entry), and HPA can coalesce it (4 pages into 1 TLB entry) when 16K base
+ * pages are in use.
+ */
+#define exec_folio_order() ilog2(SZ_64K >> PAGE_SHIFT)
+
static inline bool pud_sect_supported(void)
{
return PAGE_SIZE == SZ_4K;
@@ -1535,6 +1634,16 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t new_pte);
+#define modify_prot_start_ptes modify_prot_start_ptes
+extern pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ unsigned int nr);
+
+#define modify_prot_commit_ptes modify_prot_commit_ptes
+extern void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte,
+ unsigned int nr);
+
#ifdef CONFIG_ARM64_CONTPTE
/*
@@ -1555,10 +1664,10 @@ extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr,
extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
unsigned int nr, int full);
-extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
-extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep);
+bool contpte_test_and_clear_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
+bool contpte_clear_flush_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr);
extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned int nr);
extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma,
@@ -1721,28 +1830,43 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return __ptep_get_and_clear(mm, addr, ptep);
}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+#define test_and_clear_young_ptes test_and_clear_young_ptes
+static inline bool test_and_clear_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
{
- pte_t orig_pte = __ptep_get(ptep);
-
- if (likely(!pte_valid_cont(orig_pte)))
+ if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
return __ptep_test_and_clear_young(vma, addr, ptep);
- return contpte_ptep_test_and_clear_young(vma, addr, ptep);
+ return contpte_test_and_clear_young_ptes(vma, addr, ptep, nr);
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline bool ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return test_and_clear_young_ptes(vma, addr, ptep, 1);
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline bool ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
pte_t orig_pte = __ptep_get(ptep);
if (likely(!pte_valid_cont(orig_pte)))
return __ptep_clear_flush_young(vma, addr, ptep);
- return contpte_ptep_clear_flush_young(vma, addr, ptep);
+ return contpte_clear_flush_young_ptes(vma, addr, ptep, 1);
+}
+
+#define clear_flush_young_ptes clear_flush_young_ptes
+static inline bool clear_flush_young_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ if (likely(nr == 1 && !pte_cont(__ptep_get(ptep))))
+ return __ptep_clear_flush_young(vma, addr, ptep);
+
+ return contpte_clear_flush_young_ptes(vma, addr, ptep, nr);
}
#define wrprotect_ptes wrprotect_ptes
@@ -1825,6 +1949,6 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
#endif /* CONFIG_ARM64_CONTPTE */
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/pkeys.h b/arch/arm64/include/asm/pkeys.h
index 0ca5f83ce148..cd286d3a9c94 100644
--- a/arch/arm64/include/asm/pkeys.h
+++ b/arch/arm64/include/asm/pkeys.h
@@ -12,8 +12,7 @@
#define arch_max_pkey() 8
-int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
- unsigned long init_val);
+int arch_set_user_pkey_access(int pkey, unsigned long init_val);
static inline bool arch_pkeys_enabled(void)
{
diff --git a/arch/arm64/include/asm/por.h b/arch/arm64/include/asm/por.h
index e06e9f473675..d913d5b529e4 100644
--- a/arch/arm64/include/asm/por.h
+++ b/arch/arm64/include/asm/por.h
@@ -6,26 +6,27 @@
#ifndef _ASM_ARM64_POR_H
#define _ASM_ARM64_POR_H
-#define POR_BITS_PER_PKEY 4
-#define POR_ELx_IDX(por_elx, idx) (((por_elx) >> ((idx) * POR_BITS_PER_PKEY)) & 0xf)
+#include <asm/sysreg.h>
+
+#define POR_EL0_INIT POR_ELx_PERM_PREP(0, POE_RWX)
static inline bool por_elx_allows_read(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_R;
}
static inline bool por_elx_allows_write(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_W;
}
static inline bool por_elx_allows_exec(u64 por, u8 pkey)
{
- u8 perm = POR_ELx_IDX(por, pkey);
+ u8 perm = POR_ELx_PERM_GET(pkey, por);
return perm & POE_X;
}
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..932ea4b62042 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -2,7 +2,6 @@
#ifndef __ASM_PREEMPT_H
#define __ASM_PREEMPT_H
-#include <linux/jump_label.h>
#include <linux/thread_info.h>
#define PREEMPT_NEED_RESCHED BIT(32)
@@ -87,7 +86,6 @@ void preempt_schedule_notrace(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_preempt_schedule(void);
#define __preempt_schedule() dynamic_preempt_schedule()
void dynamic_preempt_schedule_notrace(void);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 0d5d1f0525eb..ab78a78821a2 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -9,7 +9,7 @@
#ifndef __ASM_PROCFNS_H
#define __ASM_PROCFNS_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/page.h>
@@ -21,5 +21,5 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 1bf1a3b16e88..e30c4c8e3a7a 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -23,7 +23,9 @@
#define MTE_CTRL_TCF_ASYNC (1UL << 17)
#define MTE_CTRL_TCF_ASYMM (1UL << 18)
-#ifndef __ASSEMBLY__
+#define MTE_CTRL_STORE_ONLY (1UL << 19)
+
+#ifndef __ASSEMBLER__
#include <linux/build_bug.h>
#include <linux/cache.h>
@@ -170,7 +172,12 @@ struct thread_struct {
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
- struct user_fpsimd_state kernel_fpsimd_state;
+ /*
+ * Set [cleared] by kernel_neon_begin() [kernel_neon_end()] to the
+ * address of a caller provided buffer that will be used to preserve a
+ * task's kernel mode FPSIMD state while it is scheduled out.
+ */
+ struct user_fpsimd_state *kernel_fpsimd_state;
unsigned int kernel_fpsimd_cpu;
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_user keys_user;
@@ -435,5 +442,5 @@ int set_tsc_mode(unsigned int val);
#define GET_TSC_CTL(adr) get_tsc_mode((adr))
#define SET_TSC_CTL(val) set_tsc_mode((val))
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 6cf4aae05219..baff24004459 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -7,7 +7,9 @@
#include <linux/ptdump.h>
-#ifdef CONFIG_PTDUMP_CORE
+DECLARE_STATIC_KEY_FALSE(arm64_ptdump_lock_key);
+
+#ifdef CONFIG_PTDUMP
#include <linux/mm_types.h>
#include <linux/seq_file.h>
@@ -24,8 +26,8 @@ struct ptdump_info {
};
struct ptdump_prot_bits {
- u64 mask;
- u64 val;
+ ptdesc_t mask;
+ ptdesc_t val;
const char *set;
const char *clear;
};
@@ -34,7 +36,7 @@ struct ptdump_pg_level {
const struct ptdump_prot_bits *bits;
char name[4];
int num;
- u64 mask;
+ ptdesc_t mask;
};
/*
@@ -51,7 +53,7 @@ struct ptdump_pg_state {
const struct mm_struct *mm;
unsigned long start_address;
int level;
- u64 current_prot;
+ ptdesc_t current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long uxn_pages;
@@ -59,7 +61,13 @@ struct ptdump_pg_state {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
- u64 val);
+ pteval_t val);
+void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte);
+void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd);
+void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud);
+void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d);
+void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd);
+void note_page_flush(struct ptdump_state *st);
#ifdef CONFIG_PTDUMP_DEBUGFS
#define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64
void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
@@ -69,7 +77,13 @@ static inline void ptdump_debugfs_register(struct ptdump_info *info,
#endif /* CONFIG_PTDUMP_DEBUGFS */
#else
static inline void note_page(struct ptdump_state *pt_st, unsigned long addr,
- int level, u64 val) { }
-#endif /* CONFIG_PTDUMP_CORE */
+ int level, pteval_t val) { }
+static inline void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte) { }
+static inline void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd) { }
+static inline void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud) { }
+static inline void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d) { }
+static inline void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd) { }
+static inline void note_page_flush(struct ptdump_state *st) { }
+#endif /* CONFIG_PTDUMP */
#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 47ff8654c5ec..39582511ad72 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -94,7 +94,7 @@
*/
#define NO_SYSCALL (-1)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bug.h>
#include <linux/types.h>
@@ -169,10 +169,6 @@ struct pt_regs {
u64 sdei_ttbr1;
struct frame_record_meta stackframe;
-
- /* Only valid for some EL1 exceptions. */
- u64 lockdep_hardirqs;
- u64 exit_rcu;
};
/* For correct stack alignment, pt_regs has to be a multiple of 16 bytes. */
@@ -214,11 +210,12 @@ static inline void forget_syscall(struct pt_regs *regs)
(regs)->pmr == GIC_PRIO_IRQON : \
true)
-#define interrupts_enabled(regs) \
- (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))
+static __always_inline bool regs_irqs_disabled(const struct pt_regs *regs)
+{
+ return (regs->pstate & PSR_I_BIT) || !irqs_priority_unmasked(regs);
+}
-#define fast_interrupts_enabled(regs) \
- (!((regs)->pstate & PSR_F_BIT))
+#define interrupts_enabled(regs) (!regs_irqs_disabled(regs))
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
@@ -364,5 +361,5 @@ static inline void procedure_link_pointer_set(struct pt_regs *regs,
extern unsigned long profile_pc(struct pt_regs *regs);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif
diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h
new file mode 100644
index 000000000000..b506e95cf6e3
--- /dev/null
+++ b/arch/arm64/include/asm/resctrl.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/arm_mpam.h>
diff --git a/arch/arm64/include/asm/rqspinlock.h b/arch/arm64/include/asm/rqspinlock.h
new file mode 100644
index 000000000000..9ea0a74e5892
--- /dev/null
+++ b/arch/arm64/include/asm/rqspinlock.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RQSPINLOCK_H
+#define _ASM_RQSPINLOCK_H
+
+#include <asm/barrier.h>
+
+/*
+ * Hardcode res_smp_cond_load_acquire implementations for arm64 to a custom
+ * version based on [0]. In rqspinlock code, our conditional expression involves
+ * checking the value _and_ additionally a timeout. However, on arm64, the
+ * WFE-based implementation may never spin again if no stores occur to the
+ * locked byte in the lock word. As such, we may be stuck forever if
+ * event-stream based unblocking is not available on the platform for WFE spin
+ * loops (arch_timer_evtstrm_available).
+ *
+ * Once support for smp_cond_load_acquire_timewait [0] lands, we can drop this
+ * copy-paste.
+ *
+ * While we rely on the implementation to amortize the cost of sampling
+ * cond_expr for us, it will not happen when event stream support is
+ * unavailable, time_expr check is amortized. This is not the common case, and
+ * it would be difficult to fit our logic in the time_expr_ns >= time_limit_ns
+ * comparison, hence just let it be. In case of event-stream, the loop is woken
+ * up at microsecond granularity.
+ *
+ * [0]: https://lore.kernel.org/lkml/20250203214911.898276-1-ankur.a.arora@oracle.com
+ */
+
+#ifndef smp_cond_load_acquire_timewait
+
+#define smp_cond_time_check_count 200
+
+#define __smp_cond_load_relaxed_spinwait(ptr, cond_expr, time_expr_ns, \
+ time_limit_ns) ({ \
+ typeof(ptr) __PTR = (ptr); \
+ __unqual_scalar_typeof(*ptr) VAL; \
+ unsigned int __count = 0; \
+ for (;;) { \
+ VAL = READ_ONCE(*__PTR); \
+ if (cond_expr) \
+ break; \
+ cpu_relax(); \
+ if (__count++ < smp_cond_time_check_count) \
+ continue; \
+ if ((time_expr_ns) >= (time_limit_ns)) \
+ break; \
+ __count = 0; \
+ } \
+ (typeof(*ptr))VAL; \
+})
+
+#define __smp_cond_load_acquire_timewait(ptr, cond_expr, \
+ time_expr_ns, time_limit_ns) \
+({ \
+ typeof(ptr) __PTR = (ptr); \
+ __unqual_scalar_typeof(*ptr) VAL; \
+ for (;;) { \
+ VAL = smp_load_acquire(__PTR); \
+ if (cond_expr) \
+ break; \
+ __cmpwait_relaxed(__PTR, VAL); \
+ if ((time_expr_ns) >= (time_limit_ns)) \
+ break; \
+ } \
+ (typeof(*ptr))VAL; \
+})
+
+#define smp_cond_load_acquire_timewait(ptr, cond_expr, \
+ time_expr_ns, time_limit_ns) \
+({ \
+ __unqual_scalar_typeof(*ptr) _val; \
+ int __wfe = arch_timer_evtstrm_available(); \
+ \
+ if (likely(__wfe)) { \
+ _val = __smp_cond_load_acquire_timewait(ptr, cond_expr, \
+ time_expr_ns, \
+ time_limit_ns); \
+ } else { \
+ _val = __smp_cond_load_relaxed_spinwait(ptr, cond_expr, \
+ time_expr_ns, \
+ time_limit_ns); \
+ smp_acquire__after_ctrl_dep(); \
+ } \
+ (typeof(*ptr))_val; \
+})
+
+#endif
+
+#define res_smp_cond_load_acquire(v, c) smp_cond_load_acquire_timewait(v, c, 0, 1)
+
+#include <asm-generic/rqspinlock.h>
+
+#endif /* _ASM_RQSPINLOCK_H */
diff --git a/arch/arm64/include/asm/rsi.h b/arch/arm64/include/asm/rsi.h
index 188cbb9b23f5..88b50d660e85 100644
--- a/arch/arm64/include/asm/rsi.h
+++ b/arch/arm64/include/asm/rsi.h
@@ -10,11 +10,13 @@
#include <linux/jump_label.h>
#include <asm/rsi_cmds.h>
+#define RSI_PDEV_NAME "arm-cca-dev"
+
DECLARE_STATIC_KEY_FALSE(rsi_present);
void __init arm64_rsi_init(void);
-bool __arm64_is_protected_mmio(phys_addr_t base, size_t size);
+bool arm64_rsi_is_protected(phys_addr_t base, size_t size);
static inline bool is_realm_world(void)
{
diff --git a/arch/arm64/include/asm/rsi_cmds.h b/arch/arm64/include/asm/rsi_cmds.h
index e6a211001bd3..2c8763876dfb 100644
--- a/arch/arm64/include/asm/rsi_cmds.h
+++ b/arch/arm64/include/asm/rsi_cmds.h
@@ -7,6 +7,8 @@
#define __ASM_RSI_CMDS_H
#include <linux/arm-smccc.h>
+#include <linux/string.h>
+#include <asm/memory.h>
#include <asm/rsi_smc.h>
diff --git a/arch/arm64/include/asm/rsi_smc.h b/arch/arm64/include/asm/rsi_smc.h
index 6cb070eca9e9..e19253f96c94 100644
--- a/arch/arm64/include/asm/rsi_smc.h
+++ b/arch/arm64/include/asm/rsi_smc.h
@@ -122,7 +122,7 @@
*/
#define SMC_RSI_ATTESTATION_TOKEN_CONTINUE SMC_RSI_FID(0x195)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct realm_config {
union {
@@ -142,7 +142,7 @@ struct realm_config {
*/
} __aligned(0x1000);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
/*
* Read configuration for the current Realm.
diff --git a/arch/arm64/include/asm/runtime-const.h b/arch/arm64/include/asm/runtime-const.h
index be5915669d23..c3dbd3ae68f6 100644
--- a/arch/arm64/include/asm/runtime-const.h
+++ b/arch/arm64/include/asm/runtime-const.h
@@ -2,6 +2,10 @@
#ifndef _ASM_RUNTIME_CONST_H
#define _ASM_RUNTIME_CONST_H
+#ifdef MODULE
+ #error "Cannot use runtime-const infrastructure from modules"
+#endif
+
#include <asm/cacheflush.h>
/* Sigh. You can still run arm64 in BE mode */
diff --git a/arch/arm64/include/asm/rwonce.h b/arch/arm64/include/asm/rwonce.h
index 56f7b1d4d54b..0f3a01d30f66 100644
--- a/arch/arm64/include/asm/rwonce.h
+++ b/arch/arm64/include/asm/rwonce.h
@@ -5,23 +5,30 @@
#ifndef __ASM_RWONCE_H
#define __ASM_RWONCE_H
-#if defined(CONFIG_LTO) && !defined(__ASSEMBLY__)
+#if defined(CONFIG_LTO) && !defined(__ASSEMBLER__)
#include <linux/compiler_types.h>
#include <asm/alternative-macros.h>
#ifndef BUILD_VDSO
-#ifdef CONFIG_AS_HAS_LDAPR
#define __LOAD_RCPC(sfx, regs...) \
ALTERNATIVE( \
"ldar" #sfx "\t" #regs, \
".arch_extension rcpc\n" \
"ldapr" #sfx "\t" #regs, \
ARM64_HAS_LDAPR)
-#else
-#define __LOAD_RCPC(sfx, regs...) "ldar" #sfx "\t" #regs
-#endif /* CONFIG_AS_HAS_LDAPR */
+
+/*
+ * Replace this with typeof_unqual() when minimum compiler versions are
+ * increased to GCC 14 and Clang 19. For the time being, we need this
+ * workaround, which relies on function return values dropping qualifiers.
+ */
+#define __rwonce_typeof_unqual(x) typeof(({ \
+ __diag_push() \
+ __diag_ignore_all("-Wignored-qualifiers", "") \
+ ((typeof(x)(*)(void))0)(); \
+ __diag_pop() }))
/*
* When building with LTO, there is an increased risk of the compiler
@@ -35,9 +42,12 @@
*/
#define __READ_ONCE(x) \
({ \
- typeof(&(x)) __x = &(x); \
- int atomic = 1; \
- union { __unqual_scalar_typeof(*__x) __val; char __c[1]; } __u; \
+ auto __x = &(x); \
+ auto __ret = (__rwonce_typeof_unqual(*__x) *)__x; \
+ /* Hides alias reassignment from Clang's -Wthread-safety. */ \
+ auto __retp = &__ret; \
+ union { typeof(*__ret) __val; char __c[1]; } __u; \
+ *__retp = &__u.__val; \
switch (sizeof(x)) { \
case 1: \
asm volatile(__LOAD_RCPC(b, %w0, %1) \
@@ -60,13 +70,13 @@
: "Q" (*__x) : "memory"); \
break; \
default: \
- atomic = 0; \
+ __u.__val = *(volatile typeof(*__x) *)__x; \
} \
- atomic ? (typeof(*__x))__u.__val : (*(volatile typeof(__x))__x);\
+ *__ret; \
})
#endif /* !BUILD_VDSO */
-#endif /* CONFIG_LTO && !__ASSEMBLY__ */
+#endif /* CONFIG_LTO && !__ASSEMBLER__ */
#include <asm-generic/rwonce.h>
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index a76f9b387a26..a15a2968e7b6 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -2,7 +2,7 @@
#ifndef _ASM_SCS_H
#define _ASM_SCS_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#include <asm/asm-offsets.h>
#include <asm/sysreg.h>
@@ -10,6 +10,11 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
+ .macro scs_load_current_base
+ get_current_task scs_sp
+ ldr scs_sp, [scs_sp, #TSK_TI_SCS_BASE]
+ .endm
+
.macro scs_load_current
get_current_task scs_sp
ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP]
@@ -19,6 +24,9 @@
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
+ .macro scs_load_current_base
+ .endm
+
.macro scs_load_current
.endm
@@ -53,8 +61,8 @@ enum {
EDYNSCS_INVALID_CFA_OPCODE = 4,
};
-int __pi_scs_patch(const u8 eh_frame[], int size);
+int __pi_scs_patch(const u8 eh_frame[], int size, bool skip_dry_run);
-#endif /* __ASSEMBLY __ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_SCS_H */
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index 484cb6972e99..b2248bd3cb58 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -9,7 +9,7 @@
#define SDEI_STACK_SIZE IRQ_STACK_SIZE
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/linkage.h>
#include <linux/preempt.h>
@@ -49,5 +49,5 @@ unsigned long do_sdei_event(struct pt_regs *regs,
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_SDEI_H */
diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h
index b83975555314..bf6bf40bc5ab 100644
--- a/arch/arm64/include/asm/seccomp.h
+++ b/arch/arm64/include/asm/seccomp.h
@@ -23,7 +23,6 @@
#define SECCOMP_ARCH_NATIVE_NR NR_syscalls
#define SECCOMP_ARCH_NATIVE_NAME "aarch64"
#ifdef CONFIG_COMPAT
-#include <asm/unistd_compat_32.h>
# define SECCOMP_ARCH_COMPAT AUDIT_ARCH_ARM
# define SECCOMP_ARCH_COMPAT_NR __NR_compat32_syscalls
# define SECCOMP_ARCH_COMPAT_NAME "arm"
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 40971ac1303f..51b0d594239e 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -11,6 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[];
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
+extern char __hyp_data_start[], __hyp_data_end[];
extern char __hyp_rodata_start[], __hyp_rodata_end[];
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
extern char __hyp_bss_start[], __hyp_bss_end[];
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h
index ba269a7a3201..3d96dde4d214 100644
--- a/arch/arm64/include/asm/setup.h
+++ b/arch/arm64/include/asm/setup.h
@@ -21,7 +21,7 @@ static inline bool arch_parse_debug_rodata(char *arg)
if (!arg)
return false;
- if (!strcmp(arg, "full")) {
+ if (!strcmp(arg, "on")) {
rodata_enabled = rodata_full = true;
return true;
}
@@ -31,7 +31,7 @@ static inline bool arch_parse_debug_rodata(char *arg)
return true;
}
- if (!strcmp(arg, "on")) {
+ if (!strcmp(arg, "noalias")) {
rodata_enabled = true;
rodata_full = false;
return true;
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
index 8e86c9e70e48..69ecbd69ca8c 100644
--- a/arch/arm64/include/asm/simd.h
+++ b/arch/arm64/include/asm/simd.h
@@ -6,12 +6,15 @@
#ifndef __ASM_SIMD_H
#define __ASM_SIMD_H
+#include <linux/cleanup.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/types.h>
+#include <asm/neon.h>
+
#ifdef CONFIG_KERNEL_MODE_NEON
/*
@@ -29,7 +32,7 @@ static __must_check inline bool may_use_simd(void)
*/
return !WARN_ON(!system_capabilities_finalized()) &&
system_supports_fpsimd() &&
- !in_hardirq() && !irqs_disabled() && !in_nmi();
+ !in_hardirq() && !in_nmi();
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
@@ -40,4 +43,18 @@ static __must_check inline bool may_use_simd(void) {
#endif /* ! CONFIG_KERNEL_MODE_NEON */
+DEFINE_LOCK_GUARD_1(ksimd,
+ struct user_fpsimd_state,
+ kernel_neon_begin(_T->lock),
+ kernel_neon_end(_T->lock))
+
+#define __scoped_ksimd(_label) \
+ for (struct user_fpsimd_state __uninitialized __st; \
+ true; ({ goto _label; })) \
+ if (0) { \
+_label: break; \
+ } else scoped_guard(ksimd, &__st)
+
+#define scoped_ksimd() __scoped_ksimd(__UNIQUE_ID(label))
+
#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2510eec026f7..10ea4f543069 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -23,7 +23,7 @@
#define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT)
#define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/threads.h>
#include <linux/cpumask.h>
@@ -50,10 +50,32 @@ struct seq_file;
*/
extern void smp_init_cpus(void);
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_CPU_STOP_NMI,
+ IPI_TIMER,
+ IPI_IRQ_WORK,
+ NR_IPI,
+ /*
+ * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
+ * with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
+ IPI_KGDB_ROUNDUP,
+ MAX_IPI
+};
+
/*
* Register IPI interrupts with the arch SMP code
*/
-extern void set_smp_ipi_range(int ipi_base, int nr_ipi);
+extern void set_smp_ipi_range_percpu(int ipi_base, int nr_ipi, int ncpus);
+
+static inline void set_smp_ipi_range(int ipi_base, int n)
+{
+ set_smp_ipi_range_percpu(ipi_base, n, 0);
+}
/*
* Called from the secondary holding pen, this is the secondary CPU entry point.
@@ -133,6 +155,6 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void);
-#endif /* ifndef __ASSEMBLY__ */
+#endif /* ifndef __ASSEMBLER__ */
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index 8a8acc220371..84783efdc9d1 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -5,7 +5,10 @@
#ifndef __ASM_SPARSEMEM_H
#define __ASM_SPARSEMEM_H
-#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
+#include <asm/pgtable-prot.h>
+
+#define MAX_PHYSMEM_BITS PHYS_MASK_SHIFT
+#define MAX_POSSIBLE_PHYSMEM_BITS (52)
/*
* Section size must be at least 512MB for 64K base
diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h
index 0c4d9045c31f..296ae3420bfd 100644
--- a/arch/arm64/include/asm/spectre.h
+++ b/arch/arm64/include/asm/spectre.h
@@ -12,7 +12,7 @@
#define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/smp.h>
#include <asm/percpu.h>
@@ -97,7 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
-u8 spectre_bhb_loop_affected(int scope);
+extern bool __nospectre_bhb;
+u8 get_spectre_bhb_loop_value(void);
+bool is_spectre_bhb_fw_mitigated(void);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
@@ -115,6 +117,7 @@ void spectre_bhb_patch_wa3(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void spectre_bhb_patch_clearbhb(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
+void spectre_print_disabled_mitigations(void);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_SPECTRE_H */
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index 66ec8caa6ac0..6d3280932bf5 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -59,7 +59,6 @@ static inline bool on_task_stack(const struct task_struct *tsk,
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
-#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
static inline struct stack_info stackinfo_get_overflow(void)
@@ -72,11 +71,8 @@ static inline struct stack_info stackinfo_get_overflow(void)
.high = high,
};
}
-#else
-#define stackinfo_get_overflow() stackinfo_get_unknown()
-#endif
-#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
+#if defined(CONFIG_ARM_SDE_INTERFACE)
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
diff --git a/arch/arm64/include/asm/stacktrace/frame.h b/arch/arm64/include/asm/stacktrace/frame.h
index 0ee0f6ba0fd8..796797b8db7e 100644
--- a/arch/arm64/include/asm/stacktrace/frame.h
+++ b/arch/arm64/include/asm/stacktrace/frame.h
@@ -25,7 +25,7 @@
#define FRAME_META_TYPE_FINAL 1
#define FRAME_META_TYPE_PT_REGS 2
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* A standard AAPCS64 frame record.
*/
@@ -43,6 +43,6 @@ struct frame_record_meta {
struct frame_record record;
u64 type;
};
-#endif /* __ASSEMBLY */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_STACKTRACE_FRAME_H */
diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index 44759281d0d4..171f9edef49f 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state,
DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
-DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base);
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);
diff --git a/arch/arm64/include/asm/static_call.h b/arch/arm64/include/asm/static_call.h
new file mode 100644
index 000000000000..30f6a046de4f
--- /dev/null
+++ b/arch/arm64/include/asm/static_call.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target) \
+ asm(" .pushsection .static_call.text, \"ax\" \n" \
+ " .align 4 \n" \
+ " .globl " name " \n" \
+ name ": \n" \
+ " hint 34 /* BTI C */ \n" \
+ " adrp x16, 1f \n" \
+ " ldr x16, [x16, :lo12:1f] \n" \
+ " br x16 \n" \
+ " .type " name ", %function \n" \
+ " .size " name ", . - " name " \n" \
+ " .popsection \n" \
+ " .pushsection .rodata, \"a\" \n" \
+ " .align 3 \n" \
+ "1: .quad " target " \n" \
+ " .popsection \n")
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(STATIC_CALL_TRAMP_STR(name), #func)
+
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
+
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
+
+#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 0cde2f473971..e9ce68d50ba4 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 13
+#define NR_CTX_REGS 14
#define NR_CALLEE_SAVED_REGS 12
/*
@@ -23,7 +23,7 @@ struct cpu_suspend_ctx {
* __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
* This data must survive until cpu_resume() is called.
*
- * This struct desribes the size and the layout of the saved cpu state.
+ * This struct describes the size and the layout of the saved cpu state.
* The layout of the callee_saved_regs is defined by the implementation
* of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
* in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index ab8e14b96f68..5e4c7fc44f73 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -61,16 +61,51 @@ static inline void syscall_set_return_value(struct task_struct *task,
regs->regs[0] = val;
}
-#define SYSCALL_MAX_ARGS 6
+static inline void syscall_set_nr(struct task_struct *task,
+ struct pt_regs *regs,
+ int nr)
+{
+ regs->syscallno = nr;
+ if (nr == -1) {
+ /*
+ * When the syscall number is set to -1, the syscall will be
+ * skipped. In this case the syscall return value has to be
+ * set explicitly, otherwise the first syscall argument is
+ * returned as the syscall return value.
+ */
+ syscall_set_return_value(task, regs, -ENOSYS, 0);
+ }
+}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
{
args[0] = regs->orig_x0;
- args++;
+ args[1] = regs->regs[1];
+ args[2] = regs->regs[2];
+ args[3] = regs->regs[3];
+ args[4] = regs->regs[4];
+ args[5] = regs->regs[5];
+}
- memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->regs[0] = args[0];
+ regs->regs[1] = args[1];
+ regs->regs[2] = args[2];
+ regs->regs[3] = args[3];
+ regs->regs[4] = args[4];
+ regs->regs[5] = args[5];
+
+ /*
+ * Also copy the first argument into orig_x0
+ * so that syscall_get_arguments() would return it
+ * instead of the previous value.
+ */
+ regs->orig_x0 = regs->regs[0];
}
/*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b8303a83c0bf..736561480f36 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -12,6 +12,7 @@
#include <linux/bits.h>
#include <linux/stringify.h>
#include <linux/kasan-tags.h>
+#include <linux/kconfig.h>
#include <asm/gpr-num.h>
@@ -51,7 +52,7 @@
#ifndef CONFIG_BROKEN_GAS_INST
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
// The space separator is omitted so that __emit_inst(x) can be parsed as
// either an assembler directive or an assembler macro argument.
#define __emit_inst(x) .inst(x)
@@ -70,11 +71,11 @@
(((x) >> 24) & 0x000000ff))
#endif /* CONFIG_CPU_BIG_ENDIAN */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
#define __emit_inst(x) .long __INSTR_BSWAP(x)
-#else /* __ASSEMBLY__ */
+#else /* __ASSEMBLER__ */
#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* CONFIG_BROKEN_GAS_INST */
@@ -90,7 +91,8 @@
*/
#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
#define PSTATE_Imm_shift CRm_shift
-#define SET_PSTATE(x, r) __emit_inst(0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
+#define ENCODE_PSTATE(x, r) (0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE(x, r) __emit_inst(ENCODE_PSTATE(x, r))
#define PSTATE_PAN pstate_field(0, 4)
#define PSTATE_UAO pstate_field(0, 3)
@@ -112,11 +114,16 @@
/* Register-based PAN access, for save/restore purposes */
#define SYS_PSTATE_PAN sys_reg(3, 0, 4, 2, 3)
-#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
- __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
+#define __SYS_BARRIER_INSN(op0, op1, CRn, CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | \
+ sys_insn((op0), (op1), (CRn), (CRm), (op2)) | \
+ ((Rt) & 0x1f))
-#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 3, 3, 0, 7, 31)
+#define GSB_SYS_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 0, 31)
+#define GSB_ACK_BARRIER_INSN __SYS_BARRIER_INSN(1, 0, 12, 0, 1, 31)
+/* Data cache zero operations */
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
@@ -153,11 +160,13 @@
#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
-/* Data cache zero operations */
#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
+#define SYS_DC_CIVAPS sys_insn(1, 0, 7, 15, 1)
+#define SYS_DC_CIGDVAPS sys_insn(1, 0, 7, 15, 5)
+
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
@@ -198,16 +207,8 @@
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
-#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0)
#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
-#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1)
#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
-#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2)
-#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2)
-
-#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0)
-#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1)
-#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0)
#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
@@ -273,8 +274,6 @@
/* ETM */
#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
-#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0)
-
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -283,10 +282,6 @@
#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
-#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
-
-#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
-
#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1)
#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2)
@@ -348,15 +343,6 @@
#define SYS_PAR_EL1_ATTR GENMASK_ULL(63, 56)
#define SYS_PAR_EL1_F0_RES0 (GENMASK_ULL(6, 1) | GENMASK_ULL(55, 52))
-/*** Statistical Profiling Extension ***/
-#define PMSEVFR_EL1_RES0_IMP \
- (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\
- BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0))
-#define PMSEVFR_EL1_RES0_V1P1 \
- (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
-#define PMSEVFR_EL1_RES0_V1P2 \
- (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6))
-
/* Buffer error reporting */
#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT
#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK
@@ -477,6 +463,7 @@
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1)
+#define SYS_CNTVCT_EL0 sys_reg(3, 3, 14, 0, 2)
#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5)
#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6)
@@ -484,27 +471,39 @@
#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+#define SYS_CNTV_TVAL_EL0 sys_reg(3, 3, 14, 3, 0)
#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0)
+#define SYS_AARCH32_CNTVCT sys_reg(0, 1, 0, 14, 0)
#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0)
+#define SYS_AARCH32_CNTVCTSS sys_reg(0, 9, 0, 14, 0)
#define __PMEV_op2(n) ((n) & 0x7)
#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTSVRn_EL1(n) sys_reg(2, 0, 14, __CNTR_CRm(n), __PMEV_op2(n))
#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
+#define SYS_SPMCGCRn_EL1(n) sys_reg(2, 0, 9, 13, ((n) & 1))
+
+#define __SPMEV_op2(n) ((n) & 0x7)
+#define __SPMEV_crm(p, n) ((((p) & 7) << 1) | (((n) >> 3) & 1))
+#define SYS_SPMEVCNTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b000, n), __SPMEV_op2(n))
+#define SYS_SPMEVFILT2Rn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b011, n), __SPMEV_op2(n))
+#define SYS_SPMEVFILTRn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b010, n), __SPMEV_op2(n))
+#define SYS_SPMEVTYPERn_EL0(n) sys_reg(2, 3, 14, __SPMEV_crm(0b001, n), __SPMEV_op2(n))
+
#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0)
#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5)
-#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1)
#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3)
#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0)
@@ -517,10 +516,7 @@
#define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1)
#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2)
#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0)
-#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
-#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
-#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -561,12 +557,8 @@
#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
-#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
@@ -610,28 +602,18 @@
/* VHE encodings for architectural EL0/1 system registers */
#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0)
-#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
-#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
-#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3)
-#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
-#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1)
-#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
-#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
-#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3)
#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
-#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0)
#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
-#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7)
#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
@@ -820,6 +802,12 @@
#define OP_COSP_RCTX sys_insn(1, 3, 7, 3, 6)
#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
+/*
+ * BRBE Instructions
+ */
+#define BRB_IALL_INSN __emit_inst(0xd5000000 | OP_BRB_IALL | (0x1f))
+#define BRB_INJ_INSN __emit_inst(0xd5000000 | OP_BRB_INJ | (0x1f))
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
@@ -847,12 +835,6 @@
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
-/* SCTLR_EL2 specific flags. */
-#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
- (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
- (BIT(29)))
-
-#define SCTLR_EL2_BT (BIT(36))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
#else
@@ -983,20 +965,7 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (BIT(31))
-#define TRFCR_ELx_TS_SHIFT 5
-#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
-#define TRFCR_EL2_CX BIT(3)
-#define TRFCR_ELx_ExTRE BIT(1)
-#define TRFCR_ELx_E0TRE BIT(0)
-
/* GIC Hypervisor interface registers */
-/* ICH_MISR_EL2 bit definitions */
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
/* ICH_LR*_EL2 bit definitions */
#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
@@ -1011,49 +980,6 @@
#define ICH_LR_PRIORITY_SHIFT 48
#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-/* ICH_HCR_EL2 bit definitions */
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_NPIE (1 << 3)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_TDIR (1 << 14)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
-/* ICH_VMCR_EL2 bit definitions */
-#define ICH_VMCR_ACK_CTL_SHIFT 2
-#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
-#define ICH_VMCR_FIQ_EN_SHIFT 3
-#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
-#define ICH_VMCR_CBPR_SHIFT 4
-#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
-#define ICH_VMCR_EOIM_SHIFT 9
-#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
-#define ICH_VMCR_BPR1_SHIFT 18
-#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
-#define ICH_VMCR_BPR0_SHIFT 21
-#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
-#define ICH_VMCR_PMR_SHIFT 24
-#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
-#define ICH_VMCR_ENG0_SHIFT 0
-#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
-#define ICH_VMCR_ENG1_SHIFT 1
-#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-
-/* ICH_VTR_EL2 bit definitions */
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-#define ICH_VTR_TDS_SHIFT 19
-#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
-
/*
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
@@ -1070,8 +996,11 @@
#define PIE_RX UL(0xa)
#define PIE_RW UL(0xc)
#define PIE_RWX UL(0xe)
+#define PIE_MASK UL(0xf)
-#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
+#define PIRx_ELx_BITS_PER_IDX 4
+#define PIRx_ELx_PERM_SHIFT(idx) ((idx) * PIRx_ELx_BITS_PER_IDX)
+#define PIRx_ELx_PERM_PREP(idx, perm) (((perm) & PIE_MASK) << PIRx_ELx_PERM_SHIFT(idx))
/*
* Permission Overlay Extension (POE) permission encodings.
@@ -1082,12 +1011,14 @@
#define POE_RX UL(0x3)
#define POE_W UL(0x4)
#define POE_RW UL(0x5)
-#define POE_XW UL(0x6)
-#define POE_RXW UL(0x7)
+#define POE_WX UL(0x6)
+#define POE_RWX UL(0x7)
#define POE_MASK UL(0xf)
-/* Initial value for Permission Overlay Extension for EL0 */
-#define POR_EL0_INIT POE_RXW
+#define POR_ELx_BITS_PER_IDX 4
+#define POR_ELx_PERM_SHIFT(idx) ((idx) * POR_ELx_BITS_PER_IDX)
+#define POR_ELx_PERM_GET(idx, reg) (((reg) >> POR_ELx_PERM_SHIFT(idx)) & POE_MASK)
+#define POR_ELx_PERM_PREP(idx, perm) (((perm) & POE_MASK) << POR_ELx_PERM_SHIFT(idx))
/*
* Definitions for Guarded Control Stack
@@ -1108,13 +1039,76 @@
#define GCS_CAP(x) ((((unsigned long)x) & GCS_CAP_ADDR_MASK) | \
GCS_CAP_VALID_TOKEN)
-
-#define ARM64_FEATURE_FIELD_BITS 4
-
-/* Defined for compatibility only, do not add new users. */
-#define ARM64_FEATURE_MASK(x) (x##_MASK)
-
-#ifdef __ASSEMBLY__
+/*
+ * Definitions for GICv5 instructions
+ */
+#define GICV5_OP_GIC_CDAFF sys_insn(1, 0, 12, 1, 3)
+#define GICV5_OP_GIC_CDDI sys_insn(1, 0, 12, 2, 0)
+#define GICV5_OP_GIC_CDDIS sys_insn(1, 0, 12, 1, 0)
+#define GICV5_OP_GIC_CDHM sys_insn(1, 0, 12, 2, 1)
+#define GICV5_OP_GIC_CDEN sys_insn(1, 0, 12, 1, 1)
+#define GICV5_OP_GIC_CDEOI sys_insn(1, 0, 12, 1, 7)
+#define GICV5_OP_GIC_CDPEND sys_insn(1, 0, 12, 1, 4)
+#define GICV5_OP_GIC_CDPRI sys_insn(1, 0, 12, 1, 2)
+#define GICV5_OP_GIC_CDRCFG sys_insn(1, 0, 12, 1, 5)
+#define GICV5_OP_GICR_CDIA sys_insn(1, 0, 12, 3, 0)
+#define GICV5_OP_GICR_CDNMIA sys_insn(1, 0, 12, 3, 1)
+
+/* Definitions for GIC CDAFF */
+#define GICV5_GIC_CDAFF_IAFFID_MASK GENMASK_ULL(47, 32)
+#define GICV5_GIC_CDAFF_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDAFF_IRM_MASK BIT_ULL(28)
+#define GICV5_GIC_CDAFF_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDI */
+#define GICV5_GIC_CDDI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDDIS */
+#define GICV5_GIC_CDDIS_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDDIS_TYPE(r) FIELD_GET(GICV5_GIC_CDDIS_TYPE_MASK, r)
+#define GICV5_GIC_CDDIS_ID_MASK GENMASK_ULL(23, 0)
+#define GICV5_GIC_CDDIS_ID(r) FIELD_GET(GICV5_GIC_CDDIS_ID_MASK, r)
+
+/* Definitions for GIC CDEN */
+#define GICV5_GIC_CDEN_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDEN_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDHM */
+#define GICV5_GIC_CDHM_HM_MASK BIT_ULL(32)
+#define GICV5_GIC_CDHM_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDHM_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPEND */
+#define GICV5_GIC_CDPEND_PENDING_MASK BIT_ULL(32)
+#define GICV5_GIC_CDPEND_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPEND_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDPRI */
+#define GICV5_GIC_CDPRI_PRIORITY_MASK GENMASK_ULL(39, 35)
+#define GICV5_GIC_CDPRI_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDPRI_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GIC CDRCFG */
+#define GICV5_GIC_CDRCFG_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDRCFG_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GICR CDIA */
+#define GICV5_GIC_CDIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDIA_VALID(r) FIELD_GET(GICV5_GIC_CDIA_VALID_MASK, r)
+#define GICV5_GIC_CDIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GIC_CDIA_ID_MASK GENMASK_ULL(23, 0)
+
+/* Definitions for GICR CDNMIA */
+#define GICV5_GICR_CDNMIA_VALID_MASK BIT_ULL(32)
+#define GICV5_GICR_CDNMIA_VALID(r) FIELD_GET(GICV5_GICR_CDNMIA_VALID_MASK, r)
+#define GICV5_GICR_CDNMIA_TYPE_MASK GENMASK_ULL(31, 29)
+#define GICV5_GICR_CDNMIA_ID_MASK GENMASK_ULL(23, 0)
+
+#define gicr_insn(insn) read_sysreg_s(GICV5_OP_GICR_##insn)
+#define gic_insn(v, insn) write_sysreg_s(v, GICV5_OP_GIC_##insn)
+
+#ifdef __ASSEMBLER__
.macro mrs_s, rt, sreg
__emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt))
@@ -1124,6 +1118,13 @@
__emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt))
.endm
+ .macro msr_hcr_el2, reg
+#if IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23)
+ dsb nsh
+#endif
+ msr hcr_el2, \reg
+ isb // Required by AMPERE_ERRATUM_AC04_CPU_23
+ .endm
#else
#include <linux/bitfield.h>
@@ -1194,10 +1195,19 @@
__val; \
})
+/*
+ * The "Z" constraint combined with the "%x0" template should be enough
+ * to force XZR generation if (v) is a constant 0 value but LLVM does not
+ * yet understand that modifier/constraint combo so a conditional is required
+ * to nudge the compiler into using XZR as a source for a 0 constant value.
+ */
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
u32 __maybe_unused __check_r = (u32)(r); \
- asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
+ if (__builtin_constant_p(__val) && __val == 0) \
+ asm volatile(__msr_s(r, "xzr")); \
+ else \
+ asm volatile(__msr_s(r, "%x0") : : "r" (__val)); \
} while (0)
/*
@@ -1211,6 +1221,13 @@
write_sysreg(__scs_new, sysreg); \
} while (0)
+#define sysreg_clear_set_hcr(clear, set) do { \
+ u64 __scs_val = read_sysreg(hcr_el2); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_hcr(__scs_new); \
+} while (0)
+
#define sysreg_clear_set_s(sysreg, clear, set) do { \
u64 __scs_val = read_sysreg_s(sysreg); \
u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
@@ -1218,6 +1235,17 @@
write_sysreg_s(__scs_new, sysreg); \
} while (0)
+#define write_sysreg_hcr(__val) do { \
+ if (IS_ENABLED(CONFIG_AMPERE_ERRATUM_AC04_CPU_23) && \
+ (!system_capabilities_finalized() || \
+ alternative_has_cap_unlikely(ARM64_WORKAROUND_AMPERE_AC04_CPU_23))) \
+ asm volatile("dsb nsh; msr hcr_el2, %x0; isb" \
+ : : "rZ" (__val)); \
+ else \
+ asm volatile("msr hcr_el2, %x0" \
+ : : "rZ" (__val)); \
+} while (0)
+
#define read_sysreg_par() ({ \
u64 par; \
asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index c34344256762..d316a804eb38 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -7,7 +7,7 @@
#ifndef __ASM_SYSTEM_MISC_H
#define __ASM_SYSTEM_MISC_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/compiler.h>
#include <linux/linkage.h>
@@ -25,13 +25,9 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
int signo, int sicode, unsigned long far,
unsigned long err);
-void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned long,
- struct pt_regs *),
- int sig, int code, const char *name);
-
struct mm_struct;
extern void __show_regs(struct pt_regs *);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1114c1c3300a..5d7fe3e153c8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -10,7 +10,7 @@
#include <linux/compiler.h>
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
struct task_struct;
@@ -42,6 +42,9 @@ struct thread_info {
void *scs_base;
void *scs_sp;
#endif
+#ifdef CONFIG_ARM64_MPAM
+ u64 mpam_partid_pmg;
+#endif
u32 cpu;
};
@@ -59,16 +62,18 @@ void arch_setup_new_exec(void);
#define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
-#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
-#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
-#define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
-#define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
+#define TIF_NEED_RESCHED_LAZY 2 /* Lazy rescheduling needed */
+#define TIF_NOTIFY_RESUME 3 /* callback before returning to user */
+#define TIF_FOREIGN_FPSTATE 4 /* CPU's FP state is not current's */
+#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */
+#define TIF_MTE_ASYNC_FAULT 6 /* MTE Asynchronous Tag Check Fault */
+#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
#define TIF_SECCOMP 11 /* syscall secure computing */
#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
+#define TIF_PATCH_PENDING 13 /* pending live patching update */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_FREEZE 19
#define TIF_RESTORE_SIGMASK 20
@@ -82,9 +87,11 @@ void arch_setup_new_exec(void);
#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define TIF_KERNEL_FPSTATE 29 /* Task is in a kernel mode FPSIMD section */
#define TIF_TSC_SIGSEGV 30 /* SIGSEGV on counter-timer access */
+#define TIF_LAZY_MMU_PENDING 31 /* Ops pending for lazy mmu mode exit */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
@@ -92,6 +99,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
+#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_32BIT (1 << TIF_32BIT)
@@ -100,11 +108,6 @@ void arch_setup_new_exec(void);
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_TSC_SIGSEGV (1 << TIF_TSC_SIGSEGV)
-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
- _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
- _TIF_NOTIFY_SIGNAL)
-
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_SYSCALL_EMU)
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a947c6e784ed..10869d7731b8 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -9,12 +9,7 @@
#define __ASM_TLB_H
#include <linux/pagemap.h>
-#include <linux/swap.h>
-static inline void __tlb_remove_table(void *_table)
-{
- free_page_and_swap_cache((struct page *)_table);
-}
#define tlb_flush tlb_flush
static void tlb_flush(struct mmu_gather *tlb);
@@ -58,7 +53,7 @@ static inline int tlb_get_level(struct mmu_gather *tlb)
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
- bool last_level = !tlb->freed_tables;
+ tlbf_t flags = tlb->freed_tables ? TLBF_NONE : TLBF_NOWALKCACHE;
unsigned long stride = tlb_get_unmap_size(tlb);
int tlb_level = tlb_get_level(tlb);
@@ -68,13 +63,13 @@ static inline void tlb_flush(struct mmu_gather *tlb)
* reallocate our ASID without invalidating the entire TLB.
*/
if (tlb->fullmm) {
- if (!last_level)
+ if (tlb->freed_tables)
flush_tlb_mm(tlb->mm);
return;
}
__flush_tlb_range(&vma, tlb->start, tlb->end, stride,
- last_level, tlb_level);
+ tlb_level, flags);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
@@ -82,7 +77,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{
struct ptdesc *ptdesc = page_ptdesc(pte);
- pagetable_pte_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
@@ -92,7 +86,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
{
struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
- pagetable_pmd_dtor(ptdesc);
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
@@ -106,7 +99,19 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
if (!pgtable_l4_enabled())
return;
- pagetable_pud_dtor(ptdesc);
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 4
+static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4dp,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(p4dp);
+
+ if (!pgtable_l5_enabled())
+ return;
+
tlb_remove_ptdesc(tlb, ptdesc);
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index bc94e036a26b..47fa4d39a461 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -8,7 +8,7 @@
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/bitfield.h>
#include <linux/mm_types.h>
@@ -31,19 +31,11 @@
*/
#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
"tlbi " #op "\n" \
- ALTERNATIVE("nop\n nop", \
- "dsb ish\n tlbi " #op, \
- ARM64_WORKAROUND_REPEAT_TLBI, \
- CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
- "tlbi " #op ", %0\n" \
- ALTERNATIVE("nop\n nop", \
- "dsb ish\n tlbi " #op ", %0", \
- ARM64_WORKAROUND_REPEAT_TLBI, \
- CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
- : : "r" (arg))
+ "tlbi " #op ", %x0\n" \
+ : : "rZ" (arg))
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
@@ -105,24 +97,69 @@ static inline unsigned long get_trans_granule(void)
#define TLBI_TTL_UNKNOWN INT_MAX
-#define __tlbi_level(op, addr, level) do { \
- u64 arg = addr; \
- \
- if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
- level >= 0 && level <= 3) { \
- u64 ttl = level & 3; \
- ttl |= get_trans_granule() << 2; \
- arg &= ~TLBI_TTL_MASK; \
- arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
- } \
- \
- __tlbi(op, arg); \
-} while(0)
-
-#define __tlbi_user_level(op, arg, level) do { \
- if (arm64_kernel_unmapped_at_el0()) \
- __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
-} while (0)
+typedef void (*tlbi_op)(u64 arg);
+
+static __always_inline void vae1is(u64 arg)
+{
+ __tlbi(vae1is, arg);
+ __tlbi_user(vae1is, arg);
+}
+
+static __always_inline void vae2is(u64 arg)
+{
+ __tlbi(vae2is, arg);
+}
+
+static __always_inline void vale1(u64 arg)
+{
+ __tlbi(vale1, arg);
+ __tlbi_user(vale1, arg);
+}
+
+static __always_inline void vale1is(u64 arg)
+{
+ __tlbi(vale1is, arg);
+ __tlbi_user(vale1is, arg);
+}
+
+static __always_inline void vale2is(u64 arg)
+{
+ __tlbi(vale2is, arg);
+}
+
+static __always_inline void vaale1is(u64 arg)
+{
+ __tlbi(vaale1is, arg);
+}
+
+static __always_inline void ipas2e1(u64 arg)
+{
+ __tlbi(ipas2e1, arg);
+}
+
+static __always_inline void ipas2e1is(u64 arg)
+{
+ __tlbi(ipas2e1is, arg);
+}
+
+static __always_inline void __tlbi_level_asid(tlbi_op op, u64 addr, u32 level,
+ u16 asid)
+{
+ u64 arg = __TLBI_VADDR(addr, asid);
+
+ if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
+ u64 ttl = level | (get_trans_granule() << 2);
+
+ FIELD_MODIFY(TLBI_TTL_MASK, &arg, ttl);
+ }
+
+ op(arg);
+}
+
+static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+{
+ __tlbi_level_asid(op, addr, level, 0);
+}
/*
* This macro creates a properly formatted VA operand for the TLB RANGE. The
@@ -149,19 +186,6 @@ static inline unsigned long get_trans_granule(void)
#define TLBIR_TTL_MASK GENMASK_ULL(38, 37)
#define TLBIR_BADDR_MASK GENMASK_ULL(36, 0)
-#define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
- ({ \
- unsigned long __ta = 0; \
- unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
- __ta |= FIELD_PREP(TLBIR_BADDR_MASK, baddr); \
- __ta |= FIELD_PREP(TLBIR_TTL_MASK, __ttl); \
- __ta |= FIELD_PREP(TLBIR_NUM_MASK, num); \
- __ta |= FIELD_PREP(TLBIR_SCALE_MASK, scale); \
- __ta |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule()); \
- __ta |= FIELD_PREP(TLBIR_ASID_MASK, asid); \
- __ta; \
- })
-
/* These macros are used by the TLBI RANGE feature. */
#define __TLBI_RANGE_PAGES(num, scale) \
((unsigned long)((num) + 1) << (5 * (scale) + 1))
@@ -175,11 +199,35 @@ static inline unsigned long get_trans_granule(void)
* range.
*/
#define __TLBI_RANGE_NUM(pages, scale) \
- ({ \
- int __pages = min((pages), \
- __TLBI_RANGE_PAGES(31, (scale))); \
- (__pages >> (5 * (scale) + 1)) - 1; \
- })
+ (((pages) >> (5 * (scale) + 1)) - 1)
+
+#define __repeat_tlbi_sync(op, arg...) \
+do { \
+ if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) \
+ break; \
+ __tlbi(op, ##arg); \
+ dsb(ish); \
+} while (0)
+
+/*
+ * Complete broadcast TLB maintenance issued by the host which invalidates
+ * stage 1 information in the host's own translation regime.
+ */
+static inline void __tlbi_sync_s1ish(void)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale1is, 0);
+}
+
+/*
+ * Complete broadcast TLB maintenance issued by hyp code which invalidates
+ * stage 1 translation information in any translation regime.
+ */
+static inline void __tlbi_sync_s1ish_hyp(void)
+{
+ dsb(ish);
+ __repeat_tlbi_sync(vale2is, 0);
+}
/*
* TLB Invalidation
@@ -221,10 +269,7 @@ static inline unsigned long get_trans_granule(void)
* unmapping pages from vmalloc/io space.
*
* flush_tlb_page(vma, addr)
- * Invalidate a single user mapping for address 'addr' in the
- * address space corresponding to 'vma->mm'. Note that this
- * operation only invalidates a single, last-level page-table
- * entry and therefore does not affect any walk-caches.
+ * Equivalent to __flush_tlb_page(..., flags=TLBF_NONE)
*
*
* Next, we have some undocumented invalidation routines that you probably
@@ -238,17 +283,28 @@ static inline unsigned long get_trans_granule(void)
* CPUs, ensuring that any walk-cache entries associated with the
* translation are also invalidated.
*
- * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
+ * __flush_tlb_range(vma, start, end, stride, tlb_level, flags)
* Invalidate the virtual-address range '[start, end)' on all
* CPUs for the user address space corresponding to 'vma->mm'.
* The invalidation operations are issued at a granularity
- * determined by 'stride' and only affect any walk-cache entries
- * if 'last_level' is equal to false. tlb_level is the level at
+ * determined by 'stride'. tlb_level is the level at
* which the invalidation must take place. If the level is wrong,
* no invalidation may take place. In the case where the level
* cannot be easily determined, the value TLBI_TTL_UNKNOWN will
- * perform a non-hinted invalidation.
+ * perform a non-hinted invalidation. flags may be TLBF_NONE (0) or
+ * any combination of TLBF_NOWALKCACHE (elide eviction of walk
+ * cache entries), TLBF_NONOTIFY (don't call mmu notifiers),
+ * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST
+ * (only perform the invalidation for the local cpu).
*
+ * __flush_tlb_page(vma, addr, flags)
+ * Invalidate a single user mapping for address 'addr' in the
+ * address space corresponding to 'vma->mm'. Note that this
+ * operation only invalidates a single level 3 page-table entry
+ * and therefore does not affect any walk-caches. flags may contain
+ * any combination of TLBF_NONOTIFY (don't call mmu notifiers),
+ * TLBF_NOSYNC (don't issue trailing dsb) and TLBF_NOBROADCAST
+ * (only perform the invalidation for the local cpu).
*
* Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
* on top of these routines, since that is our interface to the mmu_gather
@@ -266,7 +322,7 @@ static inline void flush_tlb_all(void)
{
dsb(ishst);
__tlbi(vmalle1is);
- dsb(ish);
+ __tlbi_sync_s1ish();
isb();
}
@@ -278,67 +334,15 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
asid = __TLBI_VADDR(0, ASID(mm));
__tlbi(aside1is, asid);
__tlbi_user(aside1is, asid);
- dsb(ish);
+ __tlbi_sync_s1ish();
mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
-static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
- unsigned long uaddr)
-{
- unsigned long addr;
-
- dsb(ishst);
- addr = __TLBI_VADDR(uaddr, ASID(mm));
- __tlbi(vale1is, addr);
- __tlbi_user(vale1is, addr);
- mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
- (uaddr & PAGE_MASK) + PAGE_SIZE);
-}
-
-static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
- unsigned long uaddr)
-{
- return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long uaddr)
-{
- flush_tlb_page_nosync(vma, uaddr);
- dsb(ish);
-}
-
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
- /*
- * TLB flush deferral is not required on systems which are affected by
- * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
- * will have two consecutive TLBI instructions with a dsb(ish) in between
- * defeating the purpose (i.e save overall 'dsb ish' cost).
- */
- if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
- return false;
-
return true;
}
-static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
- struct mm_struct *mm,
- unsigned long uaddr)
-{
- __flush_tlb_page_nosync(mm, uaddr);
-}
-
-/*
- * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
- * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
- * flush_tlb_batched_pending().
- */
-static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
-{
- dsb(ish);
-}
-
/*
* To support TLB batched flush for multiple pages unmapping, we only send
* the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
@@ -351,7 +355,7 @@ static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
*/
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
- dsb(ish);
+ __tlbi_sync_s1ish();
}
/*
@@ -363,14 +367,13 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
/*
* __flush_tlb_range_op - Perform TLBI operation upon a range
*
- * @op: TLBI instruction that operates on a range (has 'r' prefix)
+ * @lop: TLBI level operation to perform
+ * @rop: TLBI range operation to perform
* @start: The start address of the range
* @pages: Range as the number of pages from 'start'
* @stride: Flush granularity
* @asid: The ASID of the task (0 for IPA instructions)
- * @tlb_level: Translation Table level hint, if known
- * @tlbi_user: If 'true', call an additional __tlbi_user()
- * (typically for user ASIDs). 'flase' for IPA instructions
+ * @level: Translation Table level hint, if known
* @lpa2: If 'true', the lpa2 scheme is used as set out below
*
* When the CPU does not support TLB range operations, flush the TLB
@@ -393,98 +396,181 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
* operations can only span an even number of pages. We save this for last to
* ensure 64KB start alignment is maintained for the LPA2 case.
*/
-#define __flush_tlb_range_op(op, start, pages, stride, \
- asid, tlb_level, tlbi_user, lpa2) \
-do { \
- int num = 0; \
- int scale = 3; \
- int shift = lpa2 ? 16 : PAGE_SHIFT; \
- unsigned long addr; \
- \
- while (pages > 0) { \
- if (!system_supports_tlb_range() || \
- pages == 1 || \
- (lpa2 && start != ALIGN(start, SZ_64K))) { \
- addr = __TLBI_VADDR(start, asid); \
- __tlbi_level(op, addr, tlb_level); \
- if (tlbi_user) \
- __tlbi_user_level(op, addr, tlb_level); \
- start += stride; \
- pages -= stride >> PAGE_SHIFT; \
- continue; \
- } \
- \
- num = __TLBI_RANGE_NUM(pages, scale); \
- if (num >= 0) { \
- addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
- scale, num, tlb_level); \
- __tlbi(r##op, addr); \
- if (tlbi_user) \
- __tlbi_user(r##op, addr); \
- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
- pages -= __TLBI_RANGE_PAGES(num, scale); \
- } \
- scale--; \
- } \
-} while (0)
+static __always_inline void rvae1is(u64 arg)
+{
+ __tlbi(rvae1is, arg);
+ __tlbi_user(rvae1is, arg);
+}
+
+static __always_inline void rvale1(u64 arg)
+{
+ __tlbi(rvale1, arg);
+ __tlbi_user(rvale1, arg);
+}
+
+static __always_inline void rvale1is(u64 arg)
+{
+ __tlbi(rvale1is, arg);
+ __tlbi_user(rvale1is, arg);
+}
+
+static __always_inline void rvaale1is(u64 arg)
+{
+ __tlbi(rvaale1is, arg);
+}
+
+static __always_inline void ripas2e1is(u64 arg)
+{
+ __tlbi(ripas2e1is, arg);
+}
+
+static __always_inline void __tlbi_range(tlbi_op op, u64 addr,
+ u16 asid, int scale, int num,
+ u32 level, bool lpa2)
+{
+ u64 arg = 0;
+
+ arg |= FIELD_PREP(TLBIR_BADDR_MASK, addr >> (lpa2 ? 16 : PAGE_SHIFT));
+ arg |= FIELD_PREP(TLBIR_TTL_MASK, level > 3 ? 0 : level);
+ arg |= FIELD_PREP(TLBIR_NUM_MASK, num);
+ arg |= FIELD_PREP(TLBIR_SCALE_MASK, scale);
+ arg |= FIELD_PREP(TLBIR_TG_MASK, get_trans_granule());
+ arg |= FIELD_PREP(TLBIR_ASID_MASK, asid);
+
+ op(arg);
+}
+
+static __always_inline void __flush_tlb_range_op(tlbi_op lop, tlbi_op rop,
+ u64 start, size_t pages,
+ u64 stride, u16 asid,
+ u32 level, bool lpa2)
+{
+ u64 addr = start, end = start + pages * PAGE_SIZE;
+ int scale = 3;
+
+ while (addr != end) {
+ int num;
+
+ pages = (end - addr) >> PAGE_SHIFT;
+
+ if (!system_supports_tlb_range() || pages == 1)
+ goto invalidate_one;
+
+ if (lpa2 && !IS_ALIGNED(addr, SZ_64K))
+ goto invalidate_one;
+
+ num = __TLBI_RANGE_NUM(pages, scale);
+ if (num >= 0) {
+ __tlbi_range(rop, addr, asid, scale, num, level, lpa2);
+ addr += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
+ }
+
+ scale--;
+ continue;
+invalidate_one:
+ __tlbi_level_asid(lop, addr, level, asid);
+ addr += stride;
+ }
+}
+
+#define __flush_s1_tlb_range_op(op, start, pages, stride, asid, tlb_level) \
+ __flush_tlb_range_op(op, r##op, start, pages, stride, asid, tlb_level, lpa2_is_enabled())
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
- __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
+ __flush_tlb_range_op(op, r##op, start, pages, stride, 0, tlb_level, kvm_lpa2_is_enabled())
-static inline bool __flush_tlb_range_limit_excess(unsigned long start,
- unsigned long end, unsigned long pages, unsigned long stride)
+static inline bool __flush_tlb_range_limit_excess(unsigned long pages,
+ unsigned long stride)
{
/*
- * When the system does not support TLB range based flush
- * operation, (MAX_DVM_OPS - 1) pages can be handled. But
- * with TLB range based operation, MAX_TLBI_RANGE_PAGES
- * pages can be handled.
+ * Assume that the worst case number of DVM ops required to flush a
+ * given range on a system that supports tlb-range is 20 (4 scales, 1
+ * final page, 15 for alignment on LPA2 systems), which is much smaller
+ * than MAX_DVM_OPS.
*/
- if ((!system_supports_tlb_range() &&
- (end - start) >= (MAX_DVM_OPS * stride)) ||
- pages > MAX_TLBI_RANGE_PAGES)
- return true;
+ if (system_supports_tlb_range())
+ return pages > MAX_TLBI_RANGE_PAGES;
- return false;
+ return pages >= (MAX_DVM_OPS * stride) >> PAGE_SHIFT;
}
-static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long stride, bool last_level,
- int tlb_level)
+typedef unsigned __bitwise tlbf_t;
+
+/* No special behaviour. */
+#define TLBF_NONE ((__force tlbf_t)0)
+
+/* Invalidate tlb entries only, leaving the page table walk cache intact. */
+#define TLBF_NOWALKCACHE ((__force tlbf_t)BIT(0))
+
+/* Skip the trailing dsb after issuing tlbi. */
+#define TLBF_NOSYNC ((__force tlbf_t)BIT(1))
+
+/* Suppress tlb notifier callbacks for this flush operation. */
+#define TLBF_NONOTIFY ((__force tlbf_t)BIT(2))
+
+/* Perform the tlbi locally without broadcasting to other CPUs. */
+#define TLBF_NOBROADCAST ((__force tlbf_t)BIT(3))
+
+static __always_inline void __do_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long stride, int tlb_level,
+ tlbf_t flags)
{
+ struct mm_struct *mm = vma->vm_mm;
unsigned long asid, pages;
- start = round_down(start, stride);
- end = round_up(end, stride);
pages = (end - start) >> PAGE_SHIFT;
- if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
- flush_tlb_mm(vma->vm_mm);
+ if (__flush_tlb_range_limit_excess(pages, stride)) {
+ flush_tlb_mm(mm);
return;
}
- dsb(ishst);
- asid = ASID(vma->vm_mm);
-
- if (last_level)
- __flush_tlb_range_op(vale1is, start, pages, stride, asid,
- tlb_level, true, lpa2_is_enabled());
+ if (!(flags & TLBF_NOBROADCAST))
+ dsb(ishst);
else
- __flush_tlb_range_op(vae1is, start, pages, stride, asid,
- tlb_level, true, lpa2_is_enabled());
+ dsb(nshst);
+
+ asid = ASID(mm);
+
+ switch (flags & (TLBF_NOWALKCACHE | TLBF_NOBROADCAST)) {
+ case TLBF_NONE:
+ __flush_s1_tlb_range_op(vae1is, start, pages, stride,
+ asid, tlb_level);
+ break;
+ case TLBF_NOWALKCACHE:
+ __flush_s1_tlb_range_op(vale1is, start, pages, stride,
+ asid, tlb_level);
+ break;
+ case TLBF_NOBROADCAST:
+ /* Combination unused */
+ BUG();
+ break;
+ case TLBF_NOWALKCACHE | TLBF_NOBROADCAST:
+ __flush_s1_tlb_range_op(vale1, start, pages, stride,
+ asid, tlb_level);
+ break;
+ }
- mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
+ if (!(flags & TLBF_NONOTIFY))
+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
+
+ if (!(flags & TLBF_NOSYNC)) {
+ if (!(flags & TLBF_NOBROADCAST))
+ __tlbi_sync_s1ish();
+ else
+ dsb(nsh);
+ }
}
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- unsigned long stride, bool last_level,
- int tlb_level)
+ unsigned long stride, int tlb_level,
+ tlbf_t flags)
{
- __flush_tlb_range_nosync(vma, start, end, stride,
- last_level, tlb_level);
- dsb(ish);
+ start = round_down(start, stride);
+ end = round_up(end, stride);
+ __do_flush_tlb_range(vma, start, end, stride, tlb_level, flags);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
@@ -496,7 +582,23 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
* Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
* information here.
*/
- __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, TLBI_TTL_UNKNOWN, TLBF_NONE);
+}
+
+static inline void __flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr, tlbf_t flags)
+{
+ unsigned long start = round_down(uaddr, PAGE_SIZE);
+ unsigned long end = start + PAGE_SIZE;
+
+ __do_flush_tlb_range(vma, start, end, PAGE_SIZE, 3,
+ TLBF_NOWALKCACHE | flags);
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ __flush_tlb_page(vma, uaddr, TLBF_NONE);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -508,15 +610,15 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
end = round_up(end, stride);
pages = (end - start) >> PAGE_SHIFT;
- if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
+ if (__flush_tlb_range_limit_excess(pages, stride)) {
flush_tlb_all();
return;
}
dsb(ishst);
- __flush_tlb_range_op(vaale1is, start, pages, stride, 0,
- TLBI_TTL_UNKNOWN, false, lpa2_is_enabled());
- dsb(ish);
+ __flush_s1_tlb_range_op(vaale1is, start, pages, stride, 0,
+ TLBI_TTL_UNKNOWN);
+ __tlbi_sync_s1ish();
isb();
}
@@ -530,9 +632,47 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
dsb(ishst);
__tlbi(vaae1is, addr);
- dsb(ish);
+ __tlbi_sync_s1ish();
isb();
}
+
+static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct vm_area_struct vma = { .vm_mm = mm, .vm_flags = 0 };
+
+ __flush_tlb_range(&vma, start, end, PAGE_SIZE, 3,
+ TLBF_NOWALKCACHE | TLBF_NOSYNC);
+}
+
+static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)
+{
+ ptdesc_t diff = oldval ^ newval;
+
+ /* invalid to valid transition requires no flush */
+ if (!(oldval & PTE_VALID))
+ return false;
+
+ /* Transition in the SW bits requires no flush */
+ diff &= ~PTE_SWBITS_MASK;
+
+ return diff;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
+}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
+
+#undef __tlbi_user
+#undef __TLBI_VADDR
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 341174bf9106..b9eaf4ad7085 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -36,6 +36,9 @@ void update_freq_counters_refs(void);
#define arch_scale_hw_pressure topology_get_hw_pressure
#define arch_update_hw_pressure topology_update_hw_pressure
+#undef arch_cpu_is_threaded
+#define arch_cpu_is_threaded() (read_cpuid_mpidr() & MPIDR_MT_BITMASK)
+
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index d780d1bd2eac..e92e4a0e48fc 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -29,7 +29,14 @@ void arm64_force_sig_fault_pkey(unsigned long far, const char *str, int pkey);
void arm64_force_sig_mceerr(int code, unsigned long far, short lsb, const char *str);
void arm64_force_sig_ptrace_errno_trap(int errno, unsigned long far, const char *str);
+int bug_brk_handler(struct pt_regs *regs, unsigned long esr);
+int cfi_brk_handler(struct pt_regs *regs, unsigned long esr);
+int reserved_fault_brk_handler(struct pt_regs *regs, unsigned long esr);
+int kasan_brk_handler(struct pt_regs *regs, unsigned long esr);
+int ubsan_brk_handler(struct pt_regs *regs, unsigned long esr);
+
int early_brk64(unsigned long addr, unsigned long esr, struct pt_regs *regs);
+void dump_kernel_instr(unsigned long kaddr);
/*
* Move regs->pc to next instruction and do necessary setup before it
@@ -109,10 +116,9 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr);
int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr);
int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr);
- unsigned long dst, src, size;
+ unsigned long dst, size;
dst = regs->regs[dstreg];
- src = regs->regs[srcreg];
size = regs->regs[sizereg];
/*
@@ -129,6 +135,7 @@ static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned lon
}
} else {
/* CPY* instruction */
+ unsigned long src = regs->regs[srcreg];
if (!(option_a ^ wrong_option)) {
/* Format is from Option B */
if (regs->pstate & PSR_N_BIT) {
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5b91803201ef..b0c83a08dda9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,7 +62,7 @@ static inline void __uaccess_ttbr0_disable(void)
local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1);
- ttbr &= ~TTBR_ASID_MASK;
+ ttbr &= ~TTBRx_EL1_ASID_MASK;
/* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
/* Set reserved ASID */
@@ -85,8 +85,8 @@ static inline void __uaccess_ttbr0_enable(void)
/* Restore active ASID */
ttbr1 = read_sysreg(ttbr1_el1);
- ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
- ttbr1 |= ttbr0 & TTBR_ASID_MASK;
+ ttbr1 &= ~TTBRx_EL1_ASID_MASK; /* safety measure */
+ ttbr1 |= ttbr0 & TTBRx_EL1_ASID_MASK;
write_sysreg(ttbr1, ttbr1_el1);
/* Restore user page table */
@@ -124,14 +124,12 @@ static inline bool uaccess_ttbr0_enable(void)
static inline void __uaccess_disable_hw_pan(void)
{
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN));
}
static inline void __uaccess_enable_hw_pan(void)
{
- asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
- CONFIG_ARM64_PAN));
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN));
}
static inline void uaccess_disable_privileged(void)
@@ -422,9 +420,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
}
#define user_access_begin(a,b) user_access_begin(a,b)
#define user_access_end() uaccess_ttbr0_disable()
-#define unsafe_put_user(x, ptr, label) \
+#define arch_unsafe_put_user(x, ptr, label) \
__raw_put_mem("sttr", x, uaccess_mask_ptr(ptr), label, U)
-#define unsafe_get_user(x, ptr, label) \
+#define arch_unsafe_get_user(x, ptr, label) \
__raw_get_mem("ldtr", x, uaccess_mask_ptr(ptr), label, U)
/*
@@ -480,7 +478,7 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
-static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
+static inline size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size)
{
kasan_check_write(dst, size);
return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
@@ -502,44 +500,4 @@ static inline size_t probe_subpage_writeable(const char __user *uaddr,
#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
-#ifdef CONFIG_ARM64_GCS
-
-static inline int gcssttr(unsigned long __user *addr, unsigned long val)
-{
- register unsigned long __user *_addr __asm__ ("x0") = addr;
- register unsigned long _val __asm__ ("x1") = val;
- int err = 0;
-
- /* GCSSTTR x1, x0 */
- asm volatile(
- "1: .inst 0xd91f1c01\n"
- "2: \n"
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
- : "+r" (err)
- : "rZ" (_val), "r" (_addr)
- : "memory");
-
- return err;
-}
-
-static inline void put_user_gcs(unsigned long val, unsigned long __user *addr,
- int *err)
-{
- int ret;
-
- if (!access_ok((char __user *)addr, sizeof(u64))) {
- *err = -EFAULT;
- return;
- }
-
- uaccess_ttbr0_enable();
- ret = gcssttr(addr, val);
- if (ret != 0)
- *err = ret;
- uaccess_ttbr0_disable();
-}
-
-
-#endif /* CONFIG_ARM64_GCS */
-
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/uprobes.h b/arch/arm64/include/asm/uprobes.h
index 014b02897f8e..89bfb0213a50 100644
--- a/arch/arm64/include/asm/uprobes.h
+++ b/arch/arm64/include/asm/uprobes.h
@@ -28,4 +28,15 @@ struct arch_uprobe {
bool simulate;
};
+int uprobe_brk_handler(struct pt_regs *regs, unsigned long esr);
+#ifdef CONFIG_UPROBES
+int uprobe_single_step_handler(struct pt_regs *regs, unsigned long esr);
+#else
+static inline int uprobe_single_step_handler(struct pt_regs *regs,
+ unsigned long esr)
+{
+ return DBG_HOOK_ERROR;
+}
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 3e3c3fdb1842..232b46969088 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -5,9 +5,9 @@
#ifndef __ASM_VDSO_H
#define __ASM_VDSO_H
-#define __VVAR_PAGES 2
+#define __VDSO_PAGES 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <generated/vdso-offsets.h>
@@ -19,6 +19,6 @@
extern char vdso_start[], vdso_end[];
extern char vdso32_start[], vdso32_end[];
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_H */
diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h
index 3ac35f4a667c..d7ebe7ceefa0 100644
--- a/arch/arm64/include/asm/vdso/compat_barrier.h
+++ b/arch/arm64/include/asm/vdso/compat_barrier.h
@@ -5,13 +5,12 @@
#ifndef __COMPAT_BARRIER_H
#define __COMPAT_BARRIER_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
- * Warning: This code is meant to be used with
- * ENABLE_COMPAT_VDSO only.
+ * Warning: This code is meant to be used from the compat vDSO only.
*/
-#ifndef ENABLE_COMPAT_VDSO
-#error This header is meant to be used with ENABLE_COMPAT_VDSO only
+#ifdef __arch64__
+#error This header is meant to be used with from the compat vDSO only
#endif
#ifdef dmb
@@ -32,6 +31,6 @@
#define smp_rmb() aarch32_smp_rmb()
#define smp_wmb() aarch32_smp_wmb()
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __COMPAT_BARRIER_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index 778c1202bbbf..a03e34b572f1 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -2,10 +2,13 @@
/*
* Copyright (C) 2018 ARM Limited
*/
-#ifndef __ASM_VDSO_GETTIMEOFDAY_H
-#define __ASM_VDSO_GETTIMEOFDAY_H
+#ifndef __ASM_VDSO_COMPAT_GETTIMEOFDAY_H
+#define __ASM_VDSO_COMPAT_GETTIMEOFDAY_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
+
+#include <vdso/clocksource.h>
+#include <vdso/time32.h>
#include <asm/barrier.h>
#include <asm/unistd_compat_32.h>
@@ -104,7 +107,7 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
u64 res;
@@ -131,48 +134,36 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
return res;
}
-static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- const struct vdso_data *ret;
+ const struct vdso_time_data *ret;
/*
- * This simply puts &_vdso_data into ret. The reason why we don't use
- * `ret = _vdso_data` is that the compiler tends to optimise this in a
- * very suboptimal way: instead of keeping &_vdso_data in a register,
- * it goes through a relocation almost every time _vdso_data must be
+ * This simply puts &_vdso_time_data into ret. The reason why we don't use
+ * `ret = _vdso_time_data` is that the compiler tends to optimise this in a
+ * very suboptimal way: instead of keeping &_vdso_time_data in a register,
+ * it goes through a relocation almost every time _vdso_time_data must be
* accessed (even in subfunctions). This is both time and space
* consuming: each relocation uses a word in the code section, and it
* has to be loaded at runtime.
*
* This trick hides the assignment from the compiler. Since it cannot
* track where the pointer comes from, it will only use one relocation
- * where __arch_get_vdso_data() is called, and then keep the result in
- * a register.
+ * where __aarch64_get_vdso_u_time_data() is called, and then keep the
+ * result in a register.
*/
- asm volatile("mov %0, %1" : "=r"(ret) : "r"(_vdso_data));
-
- return ret;
-}
-
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- const struct vdso_data *ret;
-
- /* See __arch_get_vdso_data(). */
- asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
+ asm volatile("mov %0, %1" : "=r"(ret) : "r"(&vdso_u_time_data));
return ret;
}
-#endif
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+static inline bool vdso_clocksource_ok(const struct vdso_clock *vc)
{
- return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+ return vc->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
}
#define vdso_clocksource_ok vdso_clocksource_ok
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
-#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
+#endif /* __ASM_VDSO_COMPAT_GETTIMEOFDAY_H */
diff --git a/arch/arm64/include/asm/vdso/getrandom.h b/arch/arm64/include/asm/vdso/getrandom.h
index 342f807e2044..da1d58bbfabe 100644
--- a/arch/arm64/include/asm/vdso/getrandom.h
+++ b/arch/arm64/include/asm/vdso/getrandom.h
@@ -3,7 +3,7 @@
#ifndef __ASM_VDSO_GETRANDOM_H
#define __ASM_VDSO_GETRANDOM_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/unistd.h>
#include <asm/vdso/vsyscall.h>
@@ -33,18 +33,6 @@ static __always_inline ssize_t getrandom_syscall(void *_buffer, size_t _len, uns
return ret;
}
-static __always_inline const struct vdso_rng_data *__arch_get_vdso_rng_data(void)
-{
- /*
- * The RNG data is in the real VVAR data page, but if a task belongs to a time namespace
- * then VVAR_DATA_PAGE_OFFSET points to the namespace-specific VVAR page and VVAR_TIMENS_
- * PAGE_OFFSET points to the real VVAR page.
- */
- if (IS_ENABLED(CONFIG_TIME_NS) && _vdso_data->clock_mode == VDSO_CLOCKMODE_TIMENS)
- return (void *)&_vdso_rng_data + VVAR_TIMENS_PAGE_OFFSET * (1UL << CONFIG_PAGE_SHIFT);
- return &_vdso_rng_data;
-}
-
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_GETRANDOM_H */
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 764d13e2916c..96d2eccd4995 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -5,9 +5,14 @@
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
-#ifndef __ASSEMBLY__
+#ifdef __aarch64__
+
+#ifndef __ASSEMBLER__
+
+#include <vdso/clocksource.h>
#include <asm/alternative.h>
+#include <asm/arch_timer.h>
#include <asm/barrier.h>
#include <asm/unistd.h>
#include <asm/sysreg.h>
@@ -67,10 +72,8 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
- const struct vdso_data *vd)
+ const struct vdso_time_data *vd)
{
- u64 res;
-
/*
* Core checks for mode already, so this raced against a concurrent
* update. Return something. Core will do another round and then
@@ -79,40 +82,28 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
if (clock_mode == VDSO_CLOCKMODE_NONE)
return 0;
- /*
- * If FEAT_ECV is available, use the self-synchronizing counter.
- * Otherwise the isb is required to prevent that the counter value
- * is speculated.
- */
- asm volatile(
- ALTERNATIVE("isb\n"
- "mrs %0, cntvct_el0",
- "nop\n"
- __mrs_s("%0", SYS_CNTVCTSS_EL0),
- ARM64_HAS_ECV)
- : "=r" (res)
- :
- : "memory");
-
- arch_counter_enforce_ordering(res);
-
- return res;
+ return __arch_counter_get_cntvct();
}
-static __always_inline
-const struct vdso_data *__arch_get_vdso_data(void)
+#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
+static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
{
- return _vdso_data;
-}
+ const struct vdso_time_data *ret = &vdso_u_time_data;
-#ifdef CONFIG_TIME_NS
-static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
-{
- return _timens_data;
+ /* Work around invalid absolute relocations */
+ OPTIMIZER_HIDE_VAR(ret);
+
+ return ret;
}
-#endif
+#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
+#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
+
+#endif /* !__ASSEMBLER__ */
+
+#else /* !__aarch64__ */
+
+#include "compat_gettimeofday.h"
-#endif /* !__ASSEMBLY__ */
+#endif /* __aarch64__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/arm64/include/asm/vdso/processor.h b/arch/arm64/include/asm/vdso/processor.h
index ff830b766ad2..7abb0cc81cd6 100644
--- a/arch/arm64/include/asm/vdso/processor.h
+++ b/arch/arm64/include/asm/vdso/processor.h
@@ -5,13 +5,13 @@
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
static inline void cpu_relax(void)
{
asm volatile("yield" ::: "memory");
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/vdso/vsyscall.h b/arch/arm64/include/asm/vdso/vsyscall.h
index eea51946d45a..3f3c8eb74e2e 100644
--- a/arch/arm64/include/asm/vdso/vsyscall.h
+++ b/arch/arm64/include/asm/vdso/vsyscall.h
@@ -2,50 +2,26 @@
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
-#define __VDSO_RND_DATA_OFFSET 480
-
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <vdso/datapage.h>
-enum vvar_pages {
- VVAR_DATA_PAGE_OFFSET,
- VVAR_TIMENS_PAGE_OFFSET,
- VVAR_NR_PAGES,
-};
-
#define VDSO_PRECISION_MASK ~(0xFF00ULL<<48)
-extern struct vdso_data *vdso_data;
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
static __always_inline
-struct vdso_data *__arm64_get_k_vdso_data(void)
-{
- return vdso_data;
-}
-#define __arch_get_k_vdso_data __arm64_get_k_vdso_data
-
-static __always_inline
-struct vdso_rng_data *__arm64_get_k_vdso_rnd_data(void)
-{
- return (void *)vdso_data + __VDSO_RND_DATA_OFFSET;
-}
-#define __arch_get_k_vdso_rng_data __arm64_get_k_vdso_rnd_data
-
-static __always_inline
-void __arm64_update_vsyscall(struct vdso_data *vdata)
+void __arch_update_vdso_clock(struct vdso_clock *vc)
{
- vdata[CS_HRES_COARSE].mask = VDSO_PRECISION_MASK;
- vdata[CS_RAW].mask = VDSO_PRECISION_MASK;
+ vc->mask = VDSO_PRECISION_MASK;
}
-#define __arch_update_vsyscall __arm64_update_vsyscall
+#define __arch_update_vdso_clock __arch_update_vdso_clock
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index ebf4a9f943ed..b546703c3ab9 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -40,8 +40,13 @@
*/
#define HVC_FINALISE_EL2 3
+/*
+ * HVC_GET_ICH_VTR_EL2 - Retrieve the ICH_VTR_EL2 value
+ */
+#define HVC_GET_ICH_VTR_EL2 4
+
/* Max number of HYP stub hypercalls */
-#define HVC_STUB_HCALL_NR 4
+#define HVC_STUB_HCALL_NR 5
/* Error returned when an invalid stub number is passed into x0 */
#define HVC_STUB_ERR 0xbadca11
@@ -56,7 +61,7 @@
*/
#define BOOT_CPU_FLAG_E2H BIT_ULL(32)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/ptrace.h>
#include <asm/sections.h>
@@ -67,7 +72,8 @@
* __boot_cpu_mode records what mode CPUs were booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
* In this case, both 32bit halves of __boot_cpu_mode will contain the
- * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
+ * same value (either BOOT_CPU_MODE_EL1 if booted in EL1, BOOT_CPU_MODE_EL2 if
+ * booted in EL2).
*
* Should the bootloader fail to do this, the two values will be different.
* This allows the kernel to flag an error when the secondaries have come up.
@@ -88,6 +94,15 @@ static inline bool is_pkvm_initialized(void)
static_branch_likely(&kvm_protected_mode_initialized);
}
+#ifdef CONFIG_KVM
+bool pkvm_force_reclaim_guest_page(phys_addr_t phys);
+#else
+static inline bool pkvm_force_reclaim_guest_page(phys_addr_t phys)
+{
+ return false;
+}
+#endif
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
@@ -160,6 +175,6 @@ static inline bool is_hyp_nvhe(void)
return is_hyp_mode_available() && !is_kernel_in_hyp_mode();
}
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* ! __ASM__VIRT_H */
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 38fafffe699f..4ec1acd3c1b3 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -9,18 +9,58 @@
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
- /*
- * SW table walks can't handle removal of intermediate entries.
- */
- return pud_sect_supported() &&
- !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
+ return pud_sect_supported();
}
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
- /* See arch_vmap_pud_supported() */
- return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
+ return true;
+}
+
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr,
+ unsigned long end, u64 pfn,
+ unsigned int max_page_shift)
+{
+ /*
+ * If the block is at least CONT_PTE_SIZE in size, and is naturally
+ * aligned in both virtual and physical space, then we can pte-map the
+ * block using the PTE_CONT bit for more efficient use of the TLB.
+ */
+ if (max_page_shift < CONT_PTE_SHIFT)
+ return PAGE_SIZE;
+
+ if (end - addr < CONT_PTE_SIZE)
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
+ return PAGE_SIZE;
+
+ return CONT_PTE_SIZE;
+}
+
+#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size
+static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * The caller handles alignment so it's sufficient just to check
+ * PTE_CONT.
+ */
+ return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
+}
+
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= CONT_PTE_SIZE)
+ return CONT_PTE_SHIFT;
+
+ return PAGE_SHIFT;
}
#endif
diff --git a/arch/arm64/include/asm/vmap_stack.h b/arch/arm64/include/asm/vmap_stack.h
index 20873099c035..75daee1a07e9 100644
--- a/arch/arm64/include/asm/vmap_stack.h
+++ b/arch/arm64/include/asm/vmap_stack.h
@@ -3,9 +3,7 @@
#ifndef __ASM_VMAP_STACK_H
#define __ASM_VMAP_STACK_H
-#include <linux/bug.h>
#include <linux/gfp.h>
-#include <linux/kconfig.h>
#include <linux/vmalloc.h>
#include <linux/pgtable.h>
#include <asm/memory.h>
@@ -19,8 +17,6 @@ static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
{
void *p;
- BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
-
p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
__builtin_return_address(0));
return kasan_reset_tag(p);
diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h
index 4f9bbd4d6c26..14366d35ce82 100644
--- a/arch/arm64/include/asm/vncr_mapping.h
+++ b/arch/arm64/include/asm/vncr_mapping.h
@@ -35,6 +35,8 @@
#define VNCR_CNTP_CTL_EL0 0x180
#define VNCR_SCXTNUM_EL1 0x188
#define VNCR_TFSR_EL1 0x190
+#define VNCR_HDFGRTR2_EL2 0x1A0
+#define VNCR_HDFGWTR2_EL2 0x1B0
#define VNCR_HFGRTR_EL2 0x1B8
#define VNCR_HFGWTR_EL2 0x1C0
#define VNCR_HFGITR_EL2 0x1C8
@@ -49,9 +51,13 @@
#define VNCR_SP_EL1 0x240
#define VNCR_VBAR_EL1 0x250
#define VNCR_TCR2_EL1 0x270
+#define VNCR_SCTLR2_EL1 0x278
#define VNCR_PIRE0_EL1 0x290
#define VNCR_PIR_EL1 0x2A0
#define VNCR_POR_EL1 0x2A8
+#define VNCR_HFGRTR2_EL2 0x2C0
+#define VNCR_HFGWTR2_EL2 0x2C8
+#define VNCR_HFGITR2_EL2 0x310
#define VNCR_ICH_LR0_EL2 0x400
#define VNCR_ICH_LR1_EL2 0x408
#define VNCR_ICH_LR2_EL2 0x410
@@ -79,6 +85,7 @@
#define VNCR_ICH_HCR_EL2 0x4C0
#define VNCR_ICH_VMCR_EL2 0x4C8
#define VNCR_VDISR_EL2 0x500
+#define VNCR_VSESR_EL2 0x508
#define VNCR_PMBLIMITR_EL1 0x800
#define VNCR_PMBPTR_EL1 0x810
#define VNCR_PMBSR_EL1 0x820
@@ -87,6 +94,8 @@
#define VNCR_PMSICR_EL1 0x838
#define VNCR_PMSIRR_EL1 0x840
#define VNCR_PMSLATFR_EL1 0x848
+#define VNCR_PMSNEVFR_EL1 0x850
+#define VNCR_PMSDSFR_EL1 0x858
#define VNCR_TRFCR_EL1 0x880
#define VNCR_MPAM1_EL1 0x900
#define VNCR_MPAMHCR_EL2 0x930
@@ -99,5 +108,8 @@
#define VNCR_MPAMVPM5_EL2 0x968
#define VNCR_MPAMVPM6_EL2 0x970
#define VNCR_MPAMVPM7_EL2 0x978
+#define VNCR_ICH_HFGITR_EL2 0xB10
+#define VNCR_ICH_HFGRTR_EL2 0xB18
+#define VNCR_ICH_HFGWTR_EL2 0xB20
#endif /* __ARM64_VNCR_MAPPING_H__ */
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 2788e95d0ff0..2977b5fe068d 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -14,7 +14,7 @@ enum ipi_vector {
static inline int xen_irqs_disabled(struct pt_regs *regs)
{
- return !interrupts_enabled(regs);
+ return regs_irqs_disabled(regs);
}
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h
deleted file mode 100644
index befcd8a7abc9..000000000000
--- a/arch/arm64/include/asm/xor.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm64/include/asm/xor.h
- *
- * Authors: Jackie Liu <liuyun01@kylinos.cn>
- * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
- */
-
-#include <linux/hardirq.h>
-#include <asm-generic/xor.h>
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-
-#ifdef CONFIG_KERNEL_MODE_NEON
-
-extern struct xor_block_template const xor_block_inner_neon;
-
-static void
-xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2)
-{
- kernel_neon_begin();
- xor_block_inner_neon.do_2(bytes, p1, p2);
- kernel_neon_end();
-}
-
-static void
-xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3)
-{
- kernel_neon_begin();
- xor_block_inner_neon.do_3(bytes, p1, p2, p3);
- kernel_neon_end();
-}
-
-static void
-xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4)
-{
- kernel_neon_begin();
- xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
- kernel_neon_end();
-}
-
-static void
-xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
- const unsigned long * __restrict p2,
- const unsigned long * __restrict p3,
- const unsigned long * __restrict p4,
- const unsigned long * __restrict p5)
-{
- kernel_neon_begin();
- xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
- kernel_neon_end();
-}
-
-static struct xor_block_template xor_block_arm64 = {
- .name = "arm64_neon",
- .do_2 = xor_neon_2,
- .do_3 = xor_neon_3,
- .do_4 = xor_neon_4,
- .do_5 = xor_neon_5
-};
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES \
- do { \
- xor_speed(&xor_block_8regs); \
- xor_speed(&xor_block_32regs); \
- if (cpu_has_neon()) { \
- xor_speed(&xor_block_arm64);\
- } \
- } while (0)
-
-#endif /* ! CONFIG_KERNEL_MODE_NEON */
diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h
index 485d60bee26c..d59730975f30 100644
--- a/arch/arm64/include/uapi/asm/bitsperlong.h
+++ b/arch/arm64/include/uapi/asm/bitsperlong.h
@@ -17,7 +17,12 @@
#ifndef __ASM_BITSPERLONG_H
#define __ASM_BITSPERLONG_H
+#if defined(__KERNEL__) && !defined(__aarch64__)
+/* Used by the compat vDSO */
+#define __BITS_PER_LONG 32
+#else
#define __BITS_PER_LONG 64
+#endif
#include <asm-generic/bitsperlong.h>
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 48d46b768eae..06f83ca8de56 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -56,6 +56,21 @@
#define HWCAP_PACA (1 << 30)
#define HWCAP_PACG (1UL << 31)
#define HWCAP_GCS (1UL << 32)
+#define HWCAP_CMPBR (1UL << 33)
+#define HWCAP_FPRCVT (1UL << 34)
+#define HWCAP_F8MM8 (1UL << 35)
+#define HWCAP_F8MM4 (1UL << 36)
+#define HWCAP_SVE_F16MM (1UL << 37)
+#define HWCAP_SVE_ELTPERM (1UL << 38)
+#define HWCAP_SVE_AES2 (1UL << 39)
+#define HWCAP_SVE_BFSCALE (1UL << 40)
+#define HWCAP_SVE2P2 (1UL << 41)
+#define HWCAP_SME2P2 (1UL << 42)
+#define HWCAP_SME_SBITPERM (1UL << 43)
+#define HWCAP_SME_AES (1UL << 44)
+#define HWCAP_SME_SFEXPA (1UL << 45)
+#define HWCAP_SME_STMOP (1UL << 46)
+#define HWCAP_SME_SMOP4 (1UL << 47)
/*
* HWCAP2 flags - for AT_HWCAP2
@@ -128,5 +143,9 @@
/*
* HWCAP3 flags - for AT_HWCAP3
*/
+#define HWCAP3_MTE_FAR (1UL << 0)
+#define HWCAP3_MTE_STORE_ONLY (1UL << 1)
+#define HWCAP3_LSFE (1UL << 2)
+#define HWCAP3_LS64 (1UL << 3)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 66736ff04011..1c13bfa2d38a 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -31,7 +31,7 @@
#define KVM_SPSR_FIQ 4
#define KVM_NR_SPSR 5
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/psci.h>
#include <linux/types.h>
#include <asm/ptrace.h>
@@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
-#define KVM_REG_SIZE(id) \
- (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
-
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -108,6 +105,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
+#define KVM_ARM_VCPU_HAS_EL2_E2H0 8 /* Limit NV support to E2H RES0 */
struct kvm_vcpu_init {
__u32 target;
@@ -374,6 +372,7 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@@ -384,6 +383,17 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 64-127 */
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
+#ifdef __KERNEL__
+ KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT,
+#endif
+};
+
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
@@ -406,6 +416,7 @@ enum {
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
+#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
@@ -417,13 +428,15 @@ enum {
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
+#define KVM_DEV_ARM_VGIC_USERSPACE_PPIS 5
/* Device Control API on vcpu fd */
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
-#define KVM_ARM_VCPU_PMU_V3_IRQ 0
-#define KVM_ARM_VCPU_PMU_V3_INIT 1
-#define KVM_ARM_VCPU_PMU_V3_FILTER 2
-#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
+#define KVM_ARM_VCPU_PMU_V3_SET_NR_COUNTERS 4
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 0f39ba4f3efd..6fed93fb2536 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -80,7 +80,7 @@
#define PTRACE_PEEKMTETAGS 33
#define PTRACE_POKEMTETAGS 34
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* User structures for general purpose, floating point and debug registers.
@@ -332,6 +332,6 @@ struct user_gcs {
__u64 gcspr_el0;
};
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index d42f7a92238b..e29bf3e2d0cc 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -17,7 +17,7 @@
#ifndef _UAPI__ASM_SIGCONTEXT_H
#define _UAPI__ASM_SIGCONTEXT_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <linux/types.h>
@@ -192,7 +192,7 @@ struct gcs_context {
__u64 reserved;
};
-#endif /* !__ASSEMBLY__ */
+#endif /* !__ASSEMBLER__ */
#include <asm/sve_context.h>