diff options
author | Lai Jiangshan <jiangshan.ljs@antgroup.com> | 2023-02-16 23:41:08 +0800 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2023-03-16 12:42:15 -0700 |
commit | 90e444702a7c2e5d5806260735476d9bba0b598d (patch) | |
tree | c0dbe61677c1495ee563b5a40f2195bf57af8a80 /arch/x86/kvm/mmu/mmu.c | |
parent | 753b43c9d1b73337610861a6e644f8df3635d656 (diff) | |
download | lwn-90e444702a7c2e5d5806260735476d9bba0b598d.tar.gz lwn-90e444702a7c2e5d5806260735476d9bba0b598d.zip |
KVM: x86/mmu: Move the check in FNAME(sync_page) as kvm_sync_page_check()
Prepare to check mmu->sync_page pointer before calling it.
Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com>
Link: https://lore.kernel.org/r/20230216154115.710033-3-jiangshanlai@gmail.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu/mmu.c | 43 |
1 files changed, 42 insertions, 1 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index edad1a4828dc..6749fa4794a4 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -1914,10 +1914,51 @@ static bool sp_has_gptes(struct kvm_mmu_page *sp) &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else +static bool kvm_sync_page_check(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role; + + /* + * Ignore various flags when verifying that it's safe to sync a shadow + * page using the current MMU context. + * + * - level: not part of the overall MMU role and will never match as the MMU's + * level tracks the root level + * - access: updated based on the new guest PTE + * - quadrant: not part of the overall MMU role (similar to level) + */ + const union kvm_mmu_page_role sync_role_ign = { + .level = 0xf, + .access = 0x7, + .quadrant = 0x3, + .passthrough = 0x1, + }; + + /* + * Direct pages can never be unsync, and KVM should never attempt to + * sync a shadow page for a different MMU context, e.g. if the role + * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the + * reserved bits checks will be wrong, etc... + */ + if (WARN_ON_ONCE(sp->role.direct || + (sp->role.word ^ root_role.word) & ~sync_role_ign.word)) + return false; + + return true; +} + +static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) +{ + if (!kvm_sync_page_check(vcpu, sp)) + return -1; + + return vcpu->arch.mmu->sync_page(vcpu, sp); +} + static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, struct list_head *invalid_list) { - int ret = vcpu->arch.mmu->sync_page(vcpu, sp); + int ret = __kvm_sync_page(vcpu, sp); if (ret < 0) kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |