diff options
author | Borislav Petkov <bp@suse.de> | 2021-11-10 16:06:52 -0600 |
---|---|---|
committer | Borislav Petkov <bp@suse.de> | 2021-11-15 21:05:14 +0100 |
commit | 5ed0a99b12aa2dd09afe7ba485145529b89f26e6 (patch) | |
tree | ac22731b989ce3457c69ce7aef97c320c3417cdf /arch/x86/kernel/head64.c | |
parent | dbc4c70e3cdfe204a67dd66bed78709ee3000ec0 (diff) | |
download | lwn-5ed0a99b12aa2dd09afe7ba485145529b89f26e6.tar.gz lwn-5ed0a99b12aa2dd09afe7ba485145529b89f26e6.zip |
x86/head64: Carve out the guest encryption postprocessing into a helper
Carve it out so that it is abstracted out of the main boot path. All
other encrypted guest-relevant processing should be placed in there.
No functional changes.
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20211110220731.2396491-7-brijesh.singh@amd.com
Diffstat (limited to 'arch/x86/kernel/head64.c')
-rw-r--r-- | arch/x86/kernel/head64.c | 60 |
1 files changed, 31 insertions, 29 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index fc5371a7e9d1..3be9dd213dad 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -126,6 +126,36 @@ static bool __head check_la57_support(unsigned long physaddr) } #endif +static unsigned long sme_postprocess_startup(struct boot_params *bp, pmdval_t *pmd) +{ + unsigned long vaddr, vaddr_end; + int i; + + /* Encrypt the kernel and related (if SME is active) */ + sme_encrypt_kernel(bp); + + /* + * Clear the memory encryption mask from the .bss..decrypted section. + * The bss section will be memset to zero later in the initialization so + * there is no need to zero it after changing the memory encryption + * attribute. + */ + if (sme_get_me_mask()) { + vaddr = (unsigned long)__start_bss_decrypted; + vaddr_end = (unsigned long)__end_bss_decrypted; + for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { + i = pmd_index(vaddr); + pmd[i] -= sme_get_me_mask(); + } + } + + /* + * Return the SME encryption mask (if SME is active) to be used as a + * modifier for the initial pgdir entry programmed into CR3. + */ + return sme_get_me_mask(); +} + /* Code in __startup_64() can be relocated during execution, but the compiler * doesn't have to generate PC-relative relocations when accessing globals from * that function. Clang actually does not generate them, which leads to @@ -135,7 +165,6 @@ static bool __head check_la57_support(unsigned long physaddr) unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { - unsigned long vaddr, vaddr_end; unsigned long load_delta, *p; unsigned long pgtable_flags; pgdval_t *pgd; @@ -276,34 +305,7 @@ unsigned long __head __startup_64(unsigned long physaddr, */ *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask(); - /* Encrypt the kernel and related (if SME is active) */ - sme_encrypt_kernel(bp); - - /* - * Clear the memory encryption mask from the .bss..decrypted section. - * The bss section will be memset to zero later in the initialization so - * there is no need to zero it after changing the memory encryption - * attribute. - * - * This is early code, use an open coded check for SME instead of - * using cc_platform_has(). This eliminates worries about removing - * instrumentation or checking boot_cpu_data in the cc_platform_has() - * function. - */ - if (sme_get_me_mask()) { - vaddr = (unsigned long)__start_bss_decrypted; - vaddr_end = (unsigned long)__end_bss_decrypted; - for (; vaddr < vaddr_end; vaddr += PMD_SIZE) { - i = pmd_index(vaddr); - pmd[i] -= sme_get_me_mask(); - } - } - - /* - * Return the SME encryption mask (if SME is active) to be used as a - * modifier for the initial pgdir entry programmed into CR3. - */ - return sme_get_me_mask(); + return sme_postprocess_startup(bp, pmd); } unsigned long __startup_secondary_64(void) |