summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorYang, Wei Y <wei.y.yang@intel.com>2011-08-09 18:14:01 +0800
committerAvi Kivity <avi@redhat.com>2011-09-25 19:18:02 +0300
commitcd46868c7f088baaaba8f81251c5929f6002d050 (patch)
tree03cfc435d3d88e0546709bc69083f42cba7cd677 /arch/x86/kvm/paging_tmpl.h
parente4e517b4be019787ada4cbbce2f04570c21b0cbd (diff)
downloadlwn-cd46868c7f088baaaba8f81251c5929f6002d050.tar.gz
lwn-cd46868c7f088baaaba8f81251c5929f6002d050.zip
KVM: MMU: Fix SMEP failure during fetch
This patch fix kvm-unit-tests hanging and incorrect PT_ACCESSED_MASK bit set in the case of SMEP fault. The code updated 'eperm' after the variable was checked. Signed-off-by: Yang, Wei <wei.y.yang@intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
1 files changed, 13 insertions, 9 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index f6dd9feb201b..92994100638b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -147,7 +147,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
gfn_t table_gfn;
unsigned index, pt_access, uninitialized_var(pte_access);
gpa_t pte_gpa;
- bool eperm;
+ bool eperm, last_gpte;
int offset;
const int write_fault = access & PFERR_WRITE_MASK;
const int user_fault = access & PFERR_USER_MASK;
@@ -221,6 +221,17 @@ retry_walk:
eperm = true;
#endif
+ last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
+ if (last_gpte) {
+ pte_access = pt_access &
+ FNAME(gpte_access)(vcpu, pte, true);
+ /* check if the kernel is fetching from user page */
+ if (unlikely(pte_access & PT_USER_MASK) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (fetch_fault && !user_fault)
+ eperm = true;
+ }
+
if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
int ret;
trace_kvm_mmu_set_accessed_bit(table_gfn, index,
@@ -238,18 +249,12 @@ retry_walk:
walker->ptes[walker->level - 1] = pte;
- if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
+ if (last_gpte) {
int lvl = walker->level;
gpa_t real_gpa;
gfn_t gfn;
u32 ac;
- /* check if the kernel is fetching from user page */
- if (unlikely(pte_access & PT_USER_MASK) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
- if (fetch_fault && !user_fault)
- eperm = true;
-
gfn = gpte_to_gfn_lvl(pte, lvl);
gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
@@ -295,7 +300,6 @@ retry_walk:
walker->ptes[walker->level - 1] = pte;
}
- pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
walker->pt_access = pt_access;
walker->pte_access = pte_access;
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",