diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/acpi.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/efi.h | 18 | ||||
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 44 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/ftrace.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/hwcap.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 27 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_asm.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 21 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_pgtable.h | 26 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/sdei.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/syscall.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/unistd32.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/virt.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/bitsperlong.h | 24 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/hwcap.h | 1 |
22 files changed, 120 insertions, 94 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index bd68e1b7f29f..4d537d56eb84 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -42,6 +42,9 @@ #define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ spe_interrupt) + sizeof(u16)) +#define ACPI_MADT_GICC_TRBE (offsetof(struct acpi_madt_generic_interrupt, \ + trbe_interrupt) + sizeof(u16)) + /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 4cf2cb053bc8..f482b994c608 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -30,28 +30,16 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool has_bti); -#define arch_efi_call_virt_setup() \ -({ \ - efi_virtmap_load(); \ - __efi_fpsimd_begin(); \ - raw_spin_lock(&efi_rt_lock); \ -}) - #undef arch_efi_call_virt #define arch_efi_call_virt(p, f, args...) \ __efi_rt_asm_wrapper((p)->f, #f, args) -#define arch_efi_call_virt_teardown() \ -({ \ - raw_spin_unlock(&efi_rt_lock); \ - __efi_fpsimd_end(); \ - efi_virtmap_unload(); \ -}) - -extern raw_spinlock_t efi_rt_lock; extern u64 *efi_rt_stack_top; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); +void arch_efi_call_virt_setup(void); +void arch_efi_call_virt_teardown(void); + /* * efi_rt_stack_top[-1] contains the value the stack pointer had before * switching to the EFI runtime stack. diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 8e5ffb58f83e..b7afaa026842 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -31,6 +31,13 @@ .Lskip_hcrx_\@: .endm +/* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */ +.macro __check_hvhe fail, tmp + mrs \tmp, hcr_el2 + and \tmp, \tmp, #HCR_E2H + cbz \tmp, \fail +.endm + /* * Allow Non-secure EL1 and EL0 to access physical timer and counter. * This is not necessary for VHE, since the host kernel runs in EL2, @@ -43,9 +50,7 @@ */ .macro __init_el2_timers mov x0, #3 // Enable EL1 physical timers - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 lsl x0, x0, #10 .LnVHE_\@: msr cnthctl_el2, x0 @@ -139,15 +144,14 @@ /* Coprocessor traps */ .macro __init_el2_cptr - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .LnVHE_\@ + __check_hvhe .LnVHE_\@, x1 mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .LnVHE_\@: mov x0, #0x33ff -.Lset_cptr_\@: msr cptr_el2, x0 // Disable copro. traps to EL2 +.Lskip_set_cptr_\@: .endm /* Disable any fine grained traps */ @@ -268,19 +272,19 @@ check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2 .Linit_sve_\@: /* SVE register access */ - mrs x0, cptr_el2 // Disable SVE traps - mrs x1, hcr_el2 - and x1, x1, #HCR_E2H - cbz x1, .Lcptr_nvhe_\@ + __check_hvhe .Lcptr_nvhe_\@, x1 - // VHE case + // (h)VHE case + mrs x0, cpacr_el1 // Disable SVE traps orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - b .Lset_cptr_\@ + msr cpacr_el1, x0 + b .Lskip_set_cptr_\@ .Lcptr_nvhe_\@: // nVHE case + mrs x0, cptr_el2 // Disable SVE traps bic x0, x0, #CPTR_EL2_TZ -.Lset_cptr_\@: msr cptr_el2, x0 +.Lskip_set_cptr_\@: isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector msr_s SYS_ZCR_EL2, x1 // length for EL1. @@ -289,9 +293,19 @@ check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2 .Linit_sme_\@: /* SME register access and priority mapping */ + __check_hvhe .Lcptr_nvhe_sme_\@, x1 + + // (h)VHE case + mrs x0, cpacr_el1 // Disable SME traps + orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN) + msr cpacr_el1, x0 + b .Lskip_set_cptr_sme_\@ + +.Lcptr_nvhe_sme_\@: // nVHE case mrs x0, cptr_el2 // Disable SME traps bic x0, x0, #CPTR_EL2_TSM msr cptr_el2, x0 +.Lskip_set_cptr_sme_\@: isb mrs x1, sctlr_el2 diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 67f2fb781f59..8df46f186c64 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -356,7 +356,7 @@ static inline int sme_max_virtualisable_vl(void) return vec_max_virtualisable_vl(ARM64_VEC_SME); } -extern void sme_alloc(struct task_struct *task); +extern void sme_alloc(struct task_struct *task, bool flush); extern unsigned int sme_get_vl(void); extern int sme_set_current_vl(unsigned long arg); extern int sme_get_current_vl(void); @@ -388,7 +388,7 @@ static inline void sme_smstart_sm(void) { } static inline void sme_smstop_sm(void) { } static inline void sme_smstop(void) { } -static inline void sme_alloc(struct task_struct *task) { } +static inline void sme_alloc(struct task_struct *task, bool flush) { } static inline void sme_setup(void) { } static inline unsigned int sme_get_vl(void) { return 0; } static inline int sme_max_vl(void) { return 0; } diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 21ac1c5c71d3..ab158196480c 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs { return ret_regs->fp; } + +void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, + unsigned long frame_pointer); + #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ #endif diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 692b1ec663b2..521267478d18 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -138,6 +138,7 @@ #define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) #define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) #define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS) +#define KERNEL_HWCAP_HBC __khwcap2_feature(HBC) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 577773870b66..85d26143faa5 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -118,31 +118,4 @@ #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY) #endif -/* - * To make optimal use of block mappings when laying out the linear - * mapping, round down the base of physical memory to a size that can - * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE - * (64k granule), or a multiple that can be mapped using contiguous bits - * in the page tables: 32 * PMD_SIZE (16k granule) - */ -#if defined(CONFIG_ARM64_4K_PAGES) -#define ARM64_MEMSTART_SHIFT PUD_SHIFT -#elif defined(CONFIG_ARM64_16K_PAGES) -#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT -#else -#define ARM64_MEMSTART_SHIFT PMD_SHIFT -#endif - -/* - * sparsemem vmemmap imposes an additional requirement on the alignment of - * memstart_addr, due to the fact that the base of the vmemmap region - * has a direct correspondence, and needs to appear sufficiently aligned - * in the virtual address space. - */ -#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS -#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS) -#else -#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT) -#endif - #endif /* __ASM_KERNEL_PGTABLE_H */ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 7d170aaa2db4..24e28bb2d95b 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -278,7 +278,7 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void); asmlinkage void kvm_unexpected_el2_exception(void); struct kvm_cpu_context; void handle_trap(struct kvm_cpu_context *host_ctxt); -asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on); +asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on); void __noreturn __pkvm_init_finalise(void); void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); void kvm_patch_vector_branch(struct alt_instr *alt, diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index efc0b45d79c3..3d6725ff0bf6 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -571,6 +571,14 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) return test_bit(feature, vcpu->arch.features); } +static __always_inline void kvm_write_cptr_el2(u64 val) +{ + if (has_vhe() || has_hvhe()) + write_sysreg(val, cpacr_el1); + else + write_sysreg(val, cptr_el2); +} + static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val; @@ -578,8 +586,16 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) if (has_vhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN); + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN; } else if (has_hvhe()) { val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + + if (!vcpu_has_sve(vcpu) || + (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) + val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; + if (cpus_have_final_cap(ARM64_SME)) + val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; } else { val = CPTR_NVHE_EL2_RES1; @@ -597,9 +613,6 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) { u64 val = kvm_get_reset_cptr_el2(vcpu); - if (has_vhe() || has_hvhe()) - write_sysreg(val, cpacr_el1); - else - write_sysreg(val, cptr_el2); + kvm_write_cptr_el2(val); } #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8b6096753740..d3dd05bbfe23 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -727,6 +727,8 @@ struct kvm_vcpu_arch { #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5)) /* PMUSERENR for the guest EL0 is on physical CPU */ #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6)) +/* WFI instruction trapped */ +#define IN_WFI __vcpu_single_flag(sflags, BIT(7)) /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 8294a9a7e566..929d355eae0a 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size); kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr); /** - * kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry. + * kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access + * flag in a page-table entry. * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). * @addr: Intermediate physical address to identify the page-table entry. + * @size: Size of the address range to visit. + * @mkold: True if the access flag should be cleared. * * The offset of @addr within a page is ignored. * - * If there is a valid, leaf page-table entry used to translate @addr, then - * clear the access flag in that entry. + * Tests and conditionally clears the access flag for every valid, leaf + * page-table entry used to translate the range [@addr, @addr + @size). * * Note that it is the caller's responsibility to invalidate the TLB after * calling this function to ensure that the updated permissions are visible * to the CPUs. * - * Return: The old page-table entry prior to clearing the flag, 0 on failure. + * Return: True if any of the visited PTEs had the access flag set. */ -kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr); +bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr, + u64 size, bool mkold); /** * kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a @@ -646,18 +650,6 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, enum kvm_pgtable_prot prot); /** - * kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the - * access flag set. - * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*(). - * @addr: Intermediate physical address to identify the page-table entry. - * - * The offset of @addr within a page is ignored. - * - * Return: True if the page-table entry has the access flag set, false otherwise. - */ -bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr); - -/** * kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point * of Coherency for guest stage-2 address * range. diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 4384eaa0aeb7..94b68850cb9f 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -64,7 +64,6 @@ extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); -extern void init_mem_pgprot(void); extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0bd18de9fd97..72c2e8431360 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -103,6 +103,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) +#define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) @@ -120,7 +121,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) (__boundary - 1 < (end) - 1) ? __boundary : (end); \ }) -#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) +#define pte_hw_dirty(pte) (pte_write(pte) && !pte_rdonly(pte)) #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) @@ -212,7 +213,7 @@ static inline pte_t pte_wrprotect(pte_t pte) * clear), set the PTE_DIRTY bit. */ if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); @@ -823,7 +824,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) PTE_ATTRINDX_MASK; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) - pte = pte_mkdirty(pte); + pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3918f2a67970..e5bc54522e71 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -359,14 +359,6 @@ static inline void prefetchw(const void *ptr) asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); } -#define ARCH_HAS_SPINLOCK_PREFETCH -static inline void spin_lock_prefetch(const void *ptr) -{ - asm volatile(ARM64_LSE_ATOMIC_INSN( - "prfm pstl1strm, %a0", - "nop") : : "p" (ptr)); -} - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h index 4292d9bafb9d..484cb6972e99 100644 --- a/arch/arm64/include/asm/sdei.h +++ b/arch/arm64/include/asm/sdei.h @@ -17,6 +17,9 @@ #include <asm/virt.h> +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_normal_event); +DECLARE_PER_CPU(struct sdei_registered_event *, sdei_active_critical_event); + extern unsigned long sdei_exit_mode; /* Software Delegated Exception entry point from firmware*/ @@ -29,6 +32,9 @@ asmlinkage void __sdei_asm_entry_trampoline(unsigned long event_num, unsigned long pc, unsigned long pstate); +/* Abort a running handler. Context is discarded. */ +void __sdei_handler_abort(void); + /* * The above entry point does the minimum to call C code. This function does * anything else, before calling the driver. diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 4cfe9b49709b..ab8e14b96f68 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h @@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task) return AUDIT_ARCH_AARCH64; } +int syscall_trace_enter(struct pt_regs *regs); +void syscall_trace_exit(struct pt_regs *regs); + #endif /* __ASM_SYSCALL_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index b481935e9314..16464bf9a8aa 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -803,15 +803,21 @@ /* * For registers without architectural names, or simply unsupported by * GAS. + * + * __check_r forces warnings to be generated by the compiler when + * evaluating r which wouldn't normally happen due to being passed to + * the assembler via __stringify(r). */ #define read_sysreg_s(r) ({ \ u64 __val; \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \ __val; \ }) #define write_sysreg_s(v, r) do { \ u64 __val = (u64)(v); \ + u32 __maybe_unused __check_r = (u32)(r); \ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \ } while (0) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 64a514f90131..bd77253b62e0 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -39,7 +39,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 452 +#define __NR_compat_syscalls 453 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index d952a28463e0..78b68311ec81 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -909,6 +909,8 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv) __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) +#define __NR_fchmodat2 452 +__SYSCALL(__NR_fchmodat2, sys_fchmodat2) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 5227db7640c8..261d6e9df2e1 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -78,6 +78,7 @@ extern u32 __boot_cpu_mode[2]; void __hyp_set_vectors(phys_addr_t phys_vector_base); void __hyp_reset_vectors(void); +bool is_kvm_arm_initialised(void); DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); diff --git a/arch/arm64/include/uapi/asm/bitsperlong.h b/arch/arm64/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..485d60bee26c --- /dev/null +++ b/arch/arm64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_BITSPERLONG_H +#define __ASM_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include <asm-generic/bitsperlong.h> + +#endif /* __ASM_BITSPERLONG_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index a2cac4305b1e..53026f45a509 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -103,5 +103,6 @@ #define HWCAP2_SME_B16B16 (1UL << 41) #define HWCAP2_SME_F16F16 (1UL << 42) #define HWCAP2_MOPS (1UL << 43) +#define HWCAP2_HBC (1UL << 44) #endif /* _UAPI__ASM_HWCAP_H */ |