summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/head64.c
diff options
context:
space:
mode:
authorBrijesh Singh <brijesh.singh@amd.com>2022-02-09 12:10:13 -0600
committerBorislav Petkov <bp@suse.de>2022-04-06 13:23:00 +0200
commitefac0eedfab515e523cde5cb7a62289eb2ee58f8 (patch)
tree2ce7766a772a1edd3c5fe1a2dd7ff0155b5c997b /arch/x86/kernel/head64.c
parent5e5ccff60a2977142d39b987a8b90e422d9fc634 (diff)
downloadlwn-efac0eedfab515e523cde5cb7a62289eb2ee58f8.tar.gz
lwn-efac0eedfab515e523cde5cb7a62289eb2ee58f8.zip
x86/kernel: Mark the .bss..decrypted section as shared in the RMP table
The encryption attribute for the .bss..decrypted section is cleared in the initial page table build. This is because the section contains the data that need to be shared between the guest and the hypervisor. When SEV-SNP is active, just clearing the encryption attribute in the page table is not enough. The page state needs to be updated in the RMP table. Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Link: https://lore.kernel.org/r/20220307213356.2797205-20-brijesh.singh@amd.com
Diffstat (limited to 'arch/x86/kernel/head64.c')
-rw-r--r--arch/x86/kernel/head64.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 83514b9827e6..656d2f3e2cf0 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -143,7 +143,20 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv
if (sme_get_me_mask()) {
vaddr = (unsigned long)__start_bss_decrypted;
vaddr_end = (unsigned long)__end_bss_decrypted;
+
for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ /*
+ * On SNP, transition the page to shared in the RMP table so that
+ * it is consistent with the page table attribute change.
+ *
+ * __start_bss_decrypted has a virtual address in the high range
+ * mapping (kernel .text). PVALIDATE, by way of
+ * early_snp_set_memory_shared(), requires a valid virtual
+ * address but the kernel is currently running off of the identity
+ * mapping so use __pa() to get a *currently* valid virtual address.
+ */
+ early_snp_set_memory_shared(__pa(vaddr), __pa(vaddr), PTRS_PER_PMD);
+
i = pmd_index(vaddr);
pmd[i] -= sme_get_me_mask();
}