From 16174eea2e4fe8247e04c17da682f2034fec0369 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:35 +0000 Subject: KVM: arm64: Set up .hyp.rodata ELF section We will need to recognize pointers in .rodata specific to hyp, so establish a .hyp.rodata ELF section. Merge it with the existing .hyp.data..ro_after_init as they are treated the same at runtime. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-3-dbrazdil@google.com --- arch/arm64/include/asm/sections.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index 8ff579361731..a6f3557d1ab2 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -11,7 +11,7 @@ extern char __alt_instructions[], __alt_instructions_end[]; extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; -extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[]; +extern char __hyp_rodata_start[], __hyp_rodata_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; -- cgit v1.2.3 From f7a4825d9569593b9a81f0768313b86175691ef1 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:36 +0000 Subject: KVM: arm64: Add symbol at the beginning of each hyp section Generating hyp relocations will require referencing positions at a given offset from the beginning of hyp sections. Since the final layout will not be determined until the linking of `vmlinux`, modify the hyp linker script to insert a symbol at the first byte of each hyp section to use as an anchor. The linker of `vmlinux` will place the symbols together with the sections. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-4-dbrazdil@google.com --- arch/arm64/include/asm/hyp_image.h | 29 +++++++++++++++++++++++++++-- arch/arm64/kvm/hyp/nvhe/hyp.lds.S | 4 ++-- 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h index daa1a1da539e..737ded6b6d0d 100644 --- a/arch/arm64/include/asm/hyp_image.h +++ b/arch/arm64/include/asm/hyp_image.h @@ -7,6 +7,9 @@ #ifndef __ARM64_HYP_IMAGE_H__ #define __ARM64_HYP_IMAGE_H__ +#define __HYP_CONCAT(a, b) a ## b +#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b) + /* * KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_, * to separate it from the kernel proper. @@ -21,9 +24,31 @@ */ #define HYP_SECTION_NAME(NAME) .hyp##NAME +/* Symbol defined at the beginning of each hyp section. */ +#define HYP_SECTION_SYMBOL_NAME(NAME) \ + HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME)) + +/* + * Helper to generate linker script statements starting a hyp section. + * + * A symbol with a well-known name is defined at the first byte. This + * is used as a base for hyp relocations (see gen-hyprel.c). It must + * be defined inside the section so the linker of `vmlinux` cannot + * separate it from the section data. + */ +#define BEGIN_HYP_SECTION(NAME) \ + HYP_SECTION_NAME(NAME) : { \ + HYP_SECTION_SYMBOL_NAME(NAME) = .; + +/* Helper to generate linker script statements ending a hyp section. */ +#define END_HYP_SECTION \ + } + /* Defines an ELF hyp section from input section @NAME and its subsections. */ -#define HYP_SECTION(NAME) \ - HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) } +#define HYP_SECTION(NAME) \ + BEGIN_HYP_SECTION(NAME) \ + *(NAME NAME##.*) \ + END_HYP_SECTION /* * Defines a linker script alias of a kernel-proper symbol referenced by diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S index cfdc59b4329b..cd119d82d8e3 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S @@ -22,7 +22,7 @@ SECTIONS { * alignment for when linking into vmlinux. */ . = ALIGN(PAGE_SIZE); - HYP_SECTION_NAME(.data..percpu) : { + BEGIN_HYP_SECTION(.data..percpu) PERCPU_INPUT(L1_CACHE_BYTES) - } + END_HYP_SECTION } -- cgit v1.2.3 From 6ec6259d7084ed32e164c9f7b69049464dd90fa5 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:38 +0000 Subject: KVM: arm64: Apply hyp relocations at runtime KVM nVHE code runs under a different VA mapping than the kernel, hence so far it avoided using absolute addressing because the VA in a constant pool is relocated by the linker to a kernel VA (see hyp_symbol_addr). Now the kernel has access to a list of positions that contain a kimg VA but will be accessed only in hyp execution context. These are generated by the gen-hyprel build-time tool and stored in .hyp.reloc. Add early boot pass over the entries and convert the kimg VAs to hyp VAs. Note that this requires for .hyp* ELF sections to be mapped read-write at that point. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-6-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 1 + arch/arm64/include/asm/sections.h | 1 + arch/arm64/kernel/smp.c | 4 +++- arch/arm64/kvm/va_layout.c | 28 ++++++++++++++++++++++++++++ 4 files changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index e52d82aeadca..6bbb44011c84 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -129,6 +129,7 @@ alternative_cb_end void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); void kvm_compute_layout(void); +void kvm_apply_hyp_relocations(void); static __always_inline unsigned long __kern_hyp_va(unsigned long v) { diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h index a6f3557d1ab2..2f36b16a5b5d 100644 --- a/arch/arm64/include/asm/sections.h +++ b/arch/arm64/include/asm/sections.h @@ -12,6 +12,7 @@ extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[]; extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; extern char __hyp_text_start[], __hyp_text_end[]; extern char __hyp_rodata_start[], __hyp_rodata_end[]; +extern char __hyp_reloc_begin[], __hyp_reloc_end[]; extern char __idmap_text_start[], __idmap_text_end[]; extern char __initdata_begin[], __initdata_end[]; extern char __inittext_begin[], __inittext_end[]; diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index ad00f99ee9b0..357590beaabb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -434,8 +434,10 @@ static void __init hyp_mode_check(void) "CPU: CPUs started in inconsistent modes"); else pr_info("CPU: All CPU(s) started at EL1\n"); - if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) + if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) { kvm_compute_layout(); + kvm_apply_hyp_relocations(); + } } void __init smp_cpus_done(unsigned int max_cpus) diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index 70fcd6a12fe1..fee7dcd95d73 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -81,6 +81,34 @@ __init void kvm_compute_layout(void) init_hyp_physvirt_offset(); } +/* + * The .hyp.reloc ELF section contains a list of kimg positions that + * contains kimg VAs but will be accessed only in hyp execution context. + * Convert them to hyp VAs. See gen-hyprel.c for more details. + */ +__init void kvm_apply_hyp_relocations(void) +{ + int32_t *rel; + int32_t *begin = (int32_t *)__hyp_reloc_begin; + int32_t *end = (int32_t *)__hyp_reloc_end; + + for (rel = begin; rel < end; ++rel) { + uintptr_t *ptr, kimg_va; + + /* + * Each entry contains a 32-bit relative offset from itself + * to a kimg VA position. + */ + ptr = (uintptr_t *)lm_alias((char *)rel + *rel); + + /* Read the kimg VA value at the relocation address. */ + kimg_va = *ptr; + + /* Convert to hyp VA and store back to the relocation address. */ + *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va)); + } +} + static u32 compute_instruction(int n, u32 rd, u32 rn) { u32 insn = AARCH64_BREAK_FAULT; -- cgit v1.2.3 From 97cbd2fc0257c6af7036a9a6415ca8ad43535d6b Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:39 +0000 Subject: KVM: arm64: Fix constant-pool users in hyp Hyp code uses absolute addressing to obtain a kimg VA of a small number of kernel symbols. Since the kernel now converts constant pool addresses to hyp VAs, this trick does not work anymore. Change the helpers to convert from hyp VA back to kimg VA or PA, as needed and rework the callers accordingly. Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-7-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 42 +++++++++++++++----------------------- arch/arm64/kvm/hyp/nvhe/host.S | 29 +++++++++++++------------- arch/arm64/kvm/hyp/nvhe/hyp-init.S | 2 -- 3 files changed, 31 insertions(+), 42 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 6bbb44011c84..adadc468cc71 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -73,49 +73,39 @@ alternative_cb_end .endm /* - * Convert a kernel image address to a PA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a PA + * reg: hypervisor address to be converted in place * tmp: temporary register - * - * The actual code generation takes place in kvm_get_kimage_voffset, and - * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_get_kimage_voffset uses the - * specific registers encoded in the instructions). */ -.macro kimg_pa reg, tmp -alternative_cb kvm_get_kimage_voffset - movz \tmp, #0 - movk \tmp, #0, lsl #16 - movk \tmp, #0, lsl #32 - movk \tmp, #0, lsl #48 -alternative_cb_end - - /* reg = __pa(reg) */ - sub \reg, \reg, \tmp +.macro hyp_pa reg, tmp + ldr_l \tmp, hyp_physvirt_offset + add \reg, \reg, \tmp .endm /* - * Convert a kernel image address to a hyp VA - * reg: kernel address to be converted in place + * Convert a hypervisor VA to a kernel image address + * reg: hypervisor address to be converted in place * tmp: temporary register * * The actual code generation takes place in kvm_get_kimage_voffset, and * the instructions below are only there to reserve the space and - * perform the register allocation (kvm_update_kimg_phys_offset uses the + * perform the register allocation (kvm_get_kimage_voffset uses the * specific registers encoded in the instructions). */ -.macro kimg_hyp_va reg, tmp -alternative_cb kvm_update_kimg_phys_offset +.macro hyp_kimg_va reg, tmp + /* Convert hyp VA -> PA. */ + hyp_pa \reg, \tmp + + /* Load kimage_voffset. */ +alternative_cb kvm_get_kimage_voffset movz \tmp, #0 movk \tmp, #0, lsl #16 movk \tmp, #0, lsl #32 movk \tmp, #0, lsl #48 alternative_cb_end - sub \reg, \reg, \tmp - mov_q \tmp, PAGE_OFFSET - orr \reg, \reg, \tmp - kern_hyp_va \reg + /* Convert PA -> kimg VA. */ + add \reg, \reg, \tmp .endm #else diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index a820dfdc9c25..6585a7cbbc56 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter) * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par); */ SYM_FUNC_START(__hyp_do_panic) - /* Load the format arguments into x1-7 */ - mov x6, x3 - get_vcpu_ptr x7, x3 - - mrs x3, esr_el2 - mrs x4, far_el2 - mrs x5, hpfar_el2 - /* Prepare and exit to the host's panic funciton. */ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ PSR_MODE_EL1h) msr spsr_el2, lr ldr lr, =panic + hyp_kimg_va lr, x6 msr elr_el2, lr - /* - * Set the panic format string and enter the host, conditionally - * restoring the host context. - */ + /* Set the panic format string. Use the, now free, LR as scratch. */ + ldr lr, =__hyp_panic_string + hyp_kimg_va lr, x6 + + /* Load the format arguments into x1-7. */ + mov x6, x3 + get_vcpu_ptr x7, x3 + mrs x3, esr_el2 + mrs x4, far_el2 + mrs x5, hpfar_el2 + + /* Enter the host, conditionally restoring the host context. */ cmp x0, xzr - ldr x0, =__hyp_panic_string + mov x0, lr b.eq __host_enter_without_restoring b __host_enter_for_panic SYM_FUNC_END(__hyp_do_panic) @@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic) * Preserve x0-x4, which may contain stub parameters. */ ldr x5, =__kvm_handle_stub_hvc - kimg_pa x5, x6 + hyp_pa x5, x6 br x5 .L__vect_end\@: .if ((.L__vect_end\@ - .L__vect_start\@) > 0x80) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index 68fd64f2313e..99b408fe09ee 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -139,7 +139,6 @@ alternative_else_nop_endif /* Set the host vector */ ldr x0, =__kvm_hyp_host_vector - kimg_hyp_va x0, x1 msr vbar_el2, x0 ret @@ -198,7 +197,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Leave idmap. */ mov x0, x29 ldr x1, =kvm_host_psci_cpu_entry - kimg_hyp_va x1, x2 br x1 SYM_CODE_END(__kvm_hyp_init_cpu) -- cgit v1.2.3 From 537db4af26e3f2e0f304f2032bc593f7e2a54938 Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:40 +0000 Subject: KVM: arm64: Remove patching of fn pointers in hyp Storing a function pointer in hyp now generates relocation information used at early boot to convert the address to hyp VA. The existing alternative-based conversion mechanism is therefore obsolete. Remove it and simplify its users. Acked-by: Ard Biesheuvel Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-8-dbrazdil@google.com --- arch/arm64/include/asm/kvm_mmu.h | 18 ------------------ arch/arm64/kernel/image-vars.h | 1 - arch/arm64/kvm/hyp/nvhe/hyp-main.c | 11 ++++------- arch/arm64/kvm/va_layout.c | 6 ------ 4 files changed, 4 insertions(+), 32 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index adadc468cc71..90873851f677 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -135,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) -static __always_inline unsigned long __kimg_hyp_va(unsigned long v) -{ - unsigned long offset; - - asm volatile(ALTERNATIVE_CB("movz %0, #0\n" - "movk %0, #0, lsl #16\n" - "movk %0, #0, lsl #32\n" - "movk %0, #0, lsl #48\n", - kvm_update_kimg_phys_offset) - : "=r" (offset)); - - return __kern_hyp_va((v - offset) | PAGE_OFFSET); -} - -#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v)))) - -#define kimg_fn_ptr(x) (typeof(x) **)(x) - /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index f676243abac6..23f1a557bd9f 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,7 +64,6 @@ __efistub__ctype = _ctype; /* Alternative callbacks for init-time patching of nVHE hyp code. */ KVM_NVHE_ALIAS(kvm_patch_vector_branch); KVM_NVHE_ALIAS(kvm_update_va_mask); -KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset); KVM_NVHE_ALIAS(kvm_get_kimage_voffset); /* Global kernel state accessed by nVHE hyp code. */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index a906f9e2ff34..f012f8665ecc 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt) typedef void (*hcall_t)(struct kvm_cpu_context *); -#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x) +#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x -static const hcall_t *host_hcall[] = { +static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), @@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = { static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(unsigned long, id, host_ctxt, 0); - const hcall_t *kfn; hcall_t hfn; id -= KVM_HOST_SMCCC_ID(0); @@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt) if (unlikely(id >= ARRAY_SIZE(host_hcall))) goto inval; - kfn = host_hcall[id]; - if (unlikely(!kfn)) + hfn = host_hcall[id]; + if (unlikely(!hfn)) goto inval; cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS; - - hfn = kimg_fn_hyp_va(kfn); hfn(host_ctxt); return; diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index fee7dcd95d73..978301392d67 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -283,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst *updptr++ = cpu_to_le32(insn); } -void kvm_update_kimg_phys_offset(struct alt_instr *alt, - __le32 *origptr, __le32 *updptr, int nr_inst) -{ - generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst); -} - void kvm_get_kimage_voffset(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst) { -- cgit v1.2.3 From 247bc166e6b3b1e4068f120f55582a3aa210cc2d Mon Sep 17 00:00:00 2001 From: David Brazdil Date: Tue, 5 Jan 2021 18:05:41 +0000 Subject: KVM: arm64: Remove hyp_symbol_addr Hyp code used the hyp_symbol_addr helper to force PC-relative addressing because absolute addressing results in kernel VAs due to the way hyp code is linked. This is not true anymore, so remove the helper and update all of its users. Acked-by: Ard Biesheuvel Signed-off-by: David Brazdil Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210105180541.65031-9-dbrazdil@google.com --- arch/arm64/include/asm/kvm_asm.h | 26 -------------------------- arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/hyp-smp.c | 4 ++-- arch/arm64/kvm/hyp/nvhe/psci-relay.c | 24 ++++++++++++------------ arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c | 2 +- 5 files changed, 17 insertions(+), 43 deletions(-) (limited to 'arch/arm64/include') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 8a33d83ea843..22d933e9b59e 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void); extern u32 __kvm_get_mdcr_el2(void); -#if defined(GCC_VERSION) && GCC_VERSION < 50000 -#define SYM_CONSTRAINT "i" -#else -#define SYM_CONSTRAINT "S" -#endif - -/* - * Obtain the PC-relative address of a kernel symbol - * s: symbol - * - * The goal of this macro is to return a symbol's address based on a - * PC-relative computation, as opposed to a loading the VA from a - * constant pool or something similar. This works well for HYP, as an - * absolute VA is guaranteed to be wrong. Only use this if trying to - * obtain the address of a symbol (i.e. not something you obtained by - * following a pointer). - */ -#define hyp_symbol_addr(s) \ - ({ \ - typeof(s) *addr; \ - asm("adrp %0, %1\n" \ - "add %0, %0, :lo12:%1\n" \ - : "=r" (addr) : SYM_CONSTRAINT (&s)); \ - addr; \ - }) - #define __KVM_EXTABLE(from, to) \ " .pushsection __kvm_ex_table, \"a\"\n" \ " .align 3\n" \ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 84473574c2e7..54f4860cd87c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void) struct exception_table_entry *entry, *end; unsigned long elr_el2 = read_sysreg(elr_el2); - entry = hyp_symbol_addr(__start___kvm_ex_table); - end = hyp_symbol_addr(__stop___kvm_ex_table); + entry = &__start___kvm_ex_table; + end = &__stop___kvm_ex_table; while (entry < end) { addr = (unsigned long)&entry->insn + entry->insn; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c index 2997aa156d8e..879559057dee 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c @@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu) if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base)) hyp_panic(); - cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base); + cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base; this_cpu_base = kern_hyp_va(cpu_base_array[cpu]); - elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start); + elf_base = (unsigned long)&__per_cpu_start; return this_cpu_base - elf_base; } diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index e3947846ffcb..f254a425cb3a 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -134,8 +134,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) if (cpu_id == INVALID_CPU_ID) return PSCI_RET_INVALID_PARAMS; - boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id); - init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id); + boot_args = per_cpu_ptr(&cpu_on_args, cpu_id); + init_params = per_cpu_ptr(&kvm_init_params, cpu_id); /* Check if the target CPU is already being booted. */ if (!try_acquire_boot_args(boot_args)) @@ -146,7 +146,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) wmb(); ret = psci_call(func_id, mpidr, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)), + __hyp_pa(&kvm_hyp_cpu_entry), __hyp_pa(init_params)); /* If successful, the lock will be released by the target CPU. */ @@ -165,8 +165,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -180,7 +180,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) * point if it is a deep sleep state. */ return psci_call(func_id, power_state, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params)); } @@ -192,8 +192,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) struct psci_boot_args *boot_args; struct kvm_nvhe_init_params *init_params; - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); - init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params)); + boot_args = this_cpu_ptr(&suspend_args); + init_params = this_cpu_ptr(&kvm_init_params); /* * No need to acquire a lock before writing to boot_args because a core @@ -204,7 +204,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) /* Will only return on error. */ return psci_call(func_id, - __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)), + __hyp_pa(&kvm_hyp_cpu_resume), __hyp_pa(init_params), 0); } @@ -213,12 +213,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on) struct psci_boot_args *boot_args; struct kvm_cpu_context *host_ctxt; - host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt; + host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; if (is_cpu_on) - boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args)); + boot_args = this_cpu_ptr(&cpu_on_args); else - boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args)); + boot_args = this_cpu_ptr(&suspend_args); cpu_reg(host_ctxt, 0) = boot_args->r0; write_sysreg_el2(boot_args->pc, SYS_ELR); diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c index 8f0585640241..87a54375bd6e 100644 --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c @@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) } rd = kvm_vcpu_dabt_get_rd(vcpu); - addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; + addr = kvm_vgic_global_state.vcpu_hyp_va; addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { -- cgit v1.2.3