diff options
Diffstat (limited to 'arch/x86/boot/compressed')
-rw-r--r-- | arch/x86/boot/compressed/Makefile | 12 | ||||
-rw-r--r-- | arch/x86/boot/compressed/efi_mixed.S | 341 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 110 | ||||
-rw-r--r-- | arch/x86/boot/compressed/la57toggle.S | 112 | ||||
-rw-r--r-- | arch/x86/boot/compressed/misc.c | 14 | ||||
-rw-r--r-- | arch/x86/boot/compressed/vmlinux.lds.S | 2 |
6 files changed, 121 insertions, 470 deletions
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 606c74f27459..fdbce022db55 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -38,7 +38,6 @@ KBUILD_CFLAGS += -fno-stack-protector KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += -Wno-pointer-sign -KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. @@ -98,6 +97,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-y += $(obj)/la57toggle.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o @@ -105,7 +105,6 @@ vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o $(obj)/td vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o -vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a $(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE @@ -117,9 +116,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE targets += $(patsubst $(obj)/%,%,$(vmlinux-objs-y)) vmlinux.bin.all vmlinux.relocs -# vmlinux.relocs is created by the vmlinux postlink step. -$(obj)/vmlinux.relocs: vmlinux - @true +CMD_RELOCS = arch/x86/tools/relocs +quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $< > $@;$(CMD_RELOCS) --abs-relocs $< + +$(obj)/vmlinux.relocs: vmlinux.unstripped FORCE + $(call if_changed,relocs) vmlinux.bin.all-y := $(obj)/vmlinux.bin vmlinux.bin.all-$(CONFIG_X86_NEED_RELOCS) += $(obj)/vmlinux.relocs diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S deleted file mode 100644 index 876fc6d46a13..000000000000 --- a/arch/x86/boot/compressed/efi_mixed.S +++ /dev/null @@ -1,341 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming - * - * Early support for invoking 32-bit EFI services from a 64-bit kernel. - * - * Because this thunking occurs before ExitBootServices() we have to - * restore the firmware's 32-bit GDT and IDT before we make EFI service - * calls. - * - * On the plus side, we don't have to worry about mangling 64-bit - * addresses into 32-bits because we're executing with an identity - * mapped pagetable and haven't transitioned to 64-bit virtual addresses - * yet. - */ - -#include <linux/linkage.h> -#include <asm/asm-offsets.h> -#include <asm/msr.h> -#include <asm/page_types.h> -#include <asm/processor-flags.h> -#include <asm/segment.h> -#include <asm/setup.h> - - .code64 - .text -/* - * When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode() - * is the first thing that runs after switching to long mode. Depending on - * whether the EFI handover protocol or the compat entry point was used to - * enter the kernel, it will either branch to the common 64-bit EFI stub - * entrypoint efi_stub_entry() directly, or via the 64-bit EFI PE/COFF - * entrypoint efi_pe_entry(). In the former case, the bootloader must provide a - * struct bootparams pointer as the third argument, so the presence of such a - * pointer is used to disambiguate. - * - * +--------------+ - * +------------------+ +------------+ +------>| efi_pe_entry | - * | efi32_pe_entry |---->| | | +-----------+--+ - * +------------------+ | | +------+----------------+ | - * | startup_32 |---->| startup_64_mixed_mode | | - * +------------------+ | | +------+----------------+ | - * | efi32_stub_entry |---->| | | | - * +------------------+ +------------+ | | - * V | - * +------------+ +----------------+ | - * | startup_64 |<----| efi_stub_entry |<--------+ - * +------------+ +----------------+ - */ -SYM_FUNC_START(startup_64_mixed_mode) - lea efi32_boot_args(%rip), %rdx - mov 0(%rdx), %edi - mov 4(%rdx), %esi - - /* Switch to the firmware's stack */ - movl efi32_boot_sp(%rip), %esp - andl $~7, %esp - -#ifdef CONFIG_EFI_HANDOVER_PROTOCOL - mov 8(%rdx), %edx // saved bootparams pointer - test %edx, %edx - jnz efi_stub_entry -#endif - /* - * efi_pe_entry uses MS calling convention, which requires 32 bytes of - * shadow space on the stack even if all arguments are passed in - * registers. We also need an additional 8 bytes for the space that - * would be occupied by the return address, and this also results in - * the correct stack alignment for entry. - */ - sub $40, %rsp - mov %rdi, %rcx // MS calling convention - mov %rsi, %rdx - jmp efi_pe_entry -SYM_FUNC_END(startup_64_mixed_mode) - -SYM_FUNC_START(__efi64_thunk) - push %rbp - push %rbx - - movl %ds, %eax - push %rax - movl %es, %eax - push %rax - movl %ss, %eax - push %rax - - /* Copy args passed on stack */ - movq 0x30(%rsp), %rbp - movq 0x38(%rsp), %rbx - movq 0x40(%rsp), %rax - - /* - * Convert x86-64 ABI params to i386 ABI - */ - subq $64, %rsp - movl %esi, 0x0(%rsp) - movl %edx, 0x4(%rsp) - movl %ecx, 0x8(%rsp) - movl %r8d, 0xc(%rsp) - movl %r9d, 0x10(%rsp) - movl %ebp, 0x14(%rsp) - movl %ebx, 0x18(%rsp) - movl %eax, 0x1c(%rsp) - - leaq 0x20(%rsp), %rbx - sgdt (%rbx) - sidt 16(%rbx) - - leaq 1f(%rip), %rbp - - /* - * Switch to IDT and GDT with 32-bit segments. These are the firmware - * GDT and IDT that were installed when the kernel started executing. - * The pointers were saved by the efi32_entry() routine below. - * - * Pass the saved DS selector to the 32-bit code, and use far return to - * restore the saved CS selector. - */ - lidt efi32_boot_idt(%rip) - lgdt efi32_boot_gdt(%rip) - - movzwl efi32_boot_ds(%rip), %edx - movzwq efi32_boot_cs(%rip), %rax - pushq %rax - leaq efi_enter32(%rip), %rax - pushq %rax - lretq - -1: addq $64, %rsp - movq %rdi, %rax - - pop %rbx - movl %ebx, %ss - pop %rbx - movl %ebx, %es - pop %rbx - movl %ebx, %ds - /* Clear out 32-bit selector from FS and GS */ - xorl %ebx, %ebx - movl %ebx, %fs - movl %ebx, %gs - - pop %rbx - pop %rbp - RET -SYM_FUNC_END(__efi64_thunk) - - .code32 -#ifdef CONFIG_EFI_HANDOVER_PROTOCOL -SYM_FUNC_START(efi32_stub_entry) - call 1f -1: popl %ecx - leal (efi32_boot_args - 1b)(%ecx), %ebx - - /* Clear BSS */ - xorl %eax, %eax - leal (_bss - 1b)(%ecx), %edi - leal (_ebss - 1b)(%ecx), %ecx - subl %edi, %ecx - shrl $2, %ecx - cld - rep stosl - - add $0x4, %esp /* Discard return address */ - popl %ecx - popl %edx - popl %esi - movl %esi, 8(%ebx) - jmp efi32_entry -SYM_FUNC_END(efi32_stub_entry) -#endif - -/* - * EFI service pointer must be in %edi. - * - * The stack should represent the 32-bit calling convention. - */ -SYM_FUNC_START_LOCAL(efi_enter32) - /* Load firmware selector into data and stack segment registers */ - movl %edx, %ds - movl %edx, %es - movl %edx, %fs - movl %edx, %gs - movl %edx, %ss - - /* Reload pgtables */ - movl %cr3, %eax - movl %eax, %cr3 - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Disable long mode via EFER */ - movl $MSR_EFER, %ecx - rdmsr - btrl $_EFER_LME, %eax - wrmsr - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - /* - * Some firmware will return with interrupts enabled. Be sure to - * disable them before we switch GDTs and IDTs. - */ - cli - - lidtl 16(%ebx) - lgdtl (%ebx) - - movl %cr4, %eax - btsl $(X86_CR4_PAE_BIT), %eax - movl %eax, %cr4 - - movl %cr3, %eax - movl %eax, %cr3 - - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - wrmsr - - xorl %eax, %eax - lldt %ax - - pushl $__KERNEL_CS - pushl %ebp - - /* Enable paging */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - lret -SYM_FUNC_END(efi_enter32) - -/* - * This is the common EFI stub entry point for mixed mode. - * - * Arguments: %ecx image handle - * %edx EFI system table pointer - * - * Since this is the point of no return for ordinary execution, no registers - * are considered live except for the function parameters. [Note that the EFI - * stub may still exit and return to the firmware using the Exit() EFI boot - * service.] - */ -SYM_FUNC_START_LOCAL(efi32_entry) - call 1f -1: pop %ebx - - /* Save firmware GDTR and code/data selectors */ - sgdtl (efi32_boot_gdt - 1b)(%ebx) - movw %cs, (efi32_boot_cs - 1b)(%ebx) - movw %ds, (efi32_boot_ds - 1b)(%ebx) - - /* Store firmware IDT descriptor */ - sidtl (efi32_boot_idt - 1b)(%ebx) - - /* Store firmware stack pointer */ - movl %esp, (efi32_boot_sp - 1b)(%ebx) - - /* Store boot arguments */ - leal (efi32_boot_args - 1b)(%ebx), %ebx - movl %ecx, 0(%ebx) - movl %edx, 4(%ebx) - movb $0x0, 12(%ebx) // efi_is64 - - /* - * Allocate some memory for a temporary struct boot_params, which only - * needs the minimal pieces that startup_32() relies on. - */ - subl $PARAM_SIZE, %esp - movl %esp, %esi - movl $PAGE_SIZE, BP_kernel_alignment(%esi) - movl $_end - 1b, BP_init_size(%esi) - subl $startup_32 - 1b, BP_init_size(%esi) - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - jmp startup_32 -SYM_FUNC_END(efi32_entry) - -/* - * efi_status_t efi32_pe_entry(efi_handle_t image_handle, - * efi_system_table_32_t *sys_table) - */ -SYM_FUNC_START(efi32_pe_entry) - pushl %ebp - movl %esp, %ebp - pushl %ebx // save callee-save registers - pushl %edi - - call verify_cpu // check for long mode support - testl %eax, %eax - movl $0x80000003, %eax // EFI_UNSUPPORTED - jnz 2f - - movl 8(%ebp), %ecx // image_handle - movl 12(%ebp), %edx // sys_table - jmp efi32_entry // pass %ecx, %edx - // no other registers remain live - -2: popl %edi // restore callee-save registers - popl %ebx - leave - RET -SYM_FUNC_END(efi32_pe_entry) - -#ifdef CONFIG_EFI_HANDOVER_PROTOCOL - .org efi32_stub_entry + 0x200 - .code64 -SYM_FUNC_START_NOALIGN(efi64_stub_entry) - jmp efi_handover_entry -SYM_FUNC_END(efi64_stub_entry) -#endif - - .data - .balign 8 -SYM_DATA_START_LOCAL(efi32_boot_gdt) - .word 0 - .quad 0 -SYM_DATA_END(efi32_boot_gdt) - -SYM_DATA_START_LOCAL(efi32_boot_idt) - .word 0 - .quad 0 -SYM_DATA_END(efi32_boot_idt) - -SYM_DATA_LOCAL(efi32_boot_cs, .word 0) -SYM_DATA_LOCAL(efi32_boot_ds, .word 0) -SYM_DATA_LOCAL(efi32_boot_sp, .long 0) -SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) -SYM_DATA(efi_is64, .byte 1) diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1dcb794c5479..eafd4f185e77 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -263,13 +263,6 @@ SYM_FUNC_START(startup_32) * used to perform that far jump. */ leal rva(startup_64)(%ebp), %eax -#ifdef CONFIG_EFI_MIXED - cmpb $1, rva(efi_is64)(%ebp) - je 1f - leal rva(startup_64_mixed_mode)(%ebp), %eax -1: -#endif - pushl $__KERNEL_CS pushl %eax @@ -483,110 +476,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) jmp *%rax SYM_FUNC_END(.Lrelocated) -/* - * This is the 32-bit trampoline that will be copied over to low memory. It - * will be called using the ordinary 64-bit calling convention from code - * running in 64-bit mode. - * - * Return address is at the top of the stack (might be above 4G). - * The first argument (EDI) contains the address of the temporary PGD level - * page table in 32-bit addressable memory which will be programmed into - * register CR3. - */ - .section ".rodata", "a", @progbits -SYM_CODE_START(trampoline_32bit_src) - /* - * Preserve callee save 64-bit registers on the stack: this is - * necessary because the architecture does not guarantee that GPRs will - * retain their full 64-bit values across a 32-bit mode switch. - */ - pushq %r15 - pushq %r14 - pushq %r13 - pushq %r12 - pushq %rbp - pushq %rbx - - /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */ - movq %rsp, %rbx - shrq $32, %rbx - - /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ - pushq $__KERNEL32_CS - leaq 0f(%rip), %rax - pushq %rax - lretq - - /* - * The 32-bit code below will do a far jump back to long mode and end - * up here after reconfiguring the number of paging levels. First, the - * stack pointer needs to be restored to its full 64-bit value before - * the callee save register contents can be popped from the stack. - */ -.Lret: - shlq $32, %rbx - orq %rbx, %rsp - - /* Restore the preserved 64-bit registers */ - popq %rbx - popq %rbp - popq %r12 - popq %r13 - popq %r14 - popq %r15 - retq - .code32 -0: - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Point CR3 to the trampoline's new top level page table */ - movl %edi, %cr3 - - /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - /* Avoid writing EFER if no change was made (for TDX guest) */ - jc 1f - wrmsr -1: - /* Toggle CR4.LA57 */ - movl %cr4, %eax - btcl $X86_CR4_LA57_BIT, %eax - movl %eax, %cr4 - - /* Enable paging again. */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* - * Return to the 64-bit calling code using LJMP rather than LRET, to - * avoid the need for a 32-bit addressable stack. The destination - * address will be adjusted after the template code is copied into a - * 32-bit addressable buffer. - */ -.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src) -SYM_CODE_END(trampoline_32bit_src) - -/* - * This symbol is placed right after trampoline_32bit_src() so its address can - * be used to infer the size of the trampoline code. - */ -SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src) - - /* - * The trampoline code has a size limit. - * Make sure we fail to compile if the trampoline code grows - * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. - */ - .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE - - .text SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode) /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: diff --git a/arch/x86/boot/compressed/la57toggle.S b/arch/x86/boot/compressed/la57toggle.S new file mode 100644 index 000000000000..9ee002387eb1 --- /dev/null +++ b/arch/x86/boot/compressed/la57toggle.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <asm/segment.h> +#include <asm/boot.h> +#include <asm/msr.h> +#include <asm/processor-flags.h> +#include "pgtable.h" + +/* + * This is the 32-bit trampoline that will be copied over to low memory. It + * will be called using the ordinary 64-bit calling convention from code + * running in 64-bit mode. + * + * Return address is at the top of the stack (might be above 4G). + * The first argument (EDI) contains the address of the temporary PGD level + * page table in 32-bit addressable memory which will be programmed into + * register CR3. + */ + + .section ".rodata", "a", @progbits +SYM_CODE_START(trampoline_32bit_src) + /* + * Preserve callee save 64-bit registers on the stack: this is + * necessary because the architecture does not guarantee that GPRs will + * retain their full 64-bit values across a 32-bit mode switch. + */ + pushq %r15 + pushq %r14 + pushq %r13 + pushq %r12 + pushq %rbp + pushq %rbx + + /* Preserve top half of RSP in a legacy mode GPR to avoid truncation */ + movq %rsp, %rbx + shrq $32, %rbx + + /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ + pushq $__KERNEL32_CS + leaq 0f(%rip), %rax + pushq %rax + lretq + + /* + * The 32-bit code below will do a far jump back to long mode and end + * up here after reconfiguring the number of paging levels. First, the + * stack pointer needs to be restored to its full 64-bit value before + * the callee save register contents can be popped from the stack. + */ +.Lret: + shlq $32, %rbx + orq %rbx, %rsp + + /* Restore the preserved 64-bit registers */ + popq %rbx + popq %rbp + popq %r12 + popq %r13 + popq %r14 + popq %r15 + retq + + .code32 +0: + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + + /* Point CR3 to the trampoline's new top level page table */ + movl %edi, %cr3 + + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + /* Avoid writing EFER if no change was made (for TDX guest) */ + jc 1f + wrmsr +1: + /* Toggle CR4.LA57 */ + movl %cr4, %eax + btcl $X86_CR4_LA57_BIT, %eax + movl %eax, %cr4 + + /* Enable paging again. */ + movl %cr0, %eax + btsl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + + /* + * Return to the 64-bit calling code using LJMP rather than LRET, to + * avoid the need for a 32-bit addressable stack. The destination + * address will be adjusted after the template code is copied into a + * 32-bit addressable buffer. + */ +.Ljmp: ljmpl $__KERNEL_CS, $(.Lret - trampoline_32bit_src) +SYM_CODE_END(trampoline_32bit_src) + +/* + * This symbol is placed right after trampoline_32bit_src() so its address can + * be used to infer the size of the trampoline code. + */ +SYM_DATA(trampoline_ljmp_imm_offset, .word .Ljmp + 1 - trampoline_32bit_src) + + /* + * The trampoline code has a size limit. + * Make sure we fail to compile if the trampoline code grows + * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. + */ + .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 0d37420cad02..1cdcd4aaf395 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -235,7 +235,7 @@ static void handle_relocations(void *output, unsigned long output_len, /* * Process relocations: 32 bit relocations first then 64 bit after. - * Three sets of binary relocations are added to the end of the kernel + * Two sets of binary relocations are added to the end of the kernel * before compression. Each relocation table entry is the kernel * address of the location which needs to be updated stored as a * 32-bit value which is sign extended to 64 bits. @@ -245,8 +245,6 @@ static void handle_relocations(void *output, unsigned long output_len, * kernel bits... * 0 - zero terminator for 64 bit relocations * 64 bit relocation repeated - * 0 - zero terminator for inverse 32 bit relocations - * 32 bit inverse relocation repeated * 0 - zero terminator for 32 bit relocations * 32 bit relocation repeated * @@ -263,16 +261,6 @@ static void handle_relocations(void *output, unsigned long output_len, *(uint32_t *)ptr += delta; } #ifdef CONFIG_X86_64 - while (*--reloc) { - long extended = *reloc; - extended += map; - - ptr = (unsigned long)extended; - if (ptr < min_addr || ptr > max_addr) - error("inverse 32-bit relocation outside of kernel!\n"); - - *(int32_t *)ptr -= delta; - } for (reloc--; *reloc; reloc--) { long extended = *reloc; extended += map; diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index 083ec6d7722a..3b2bc61c9408 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -48,7 +48,7 @@ SECTIONS *(.data) *(.data.*) - /* Add 4 bytes of extra space for a CRC-32 checksum */ + /* Add 4 bytes of extra space for the obsolete CRC-32 checksum */ . = ALIGN(. + 4, 0x200); _edata = . ; } |