summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/esr.h118
-rw-r--r--arch/arm64/include/asm/kvm_arm.h75
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h30
-rw-r--r--arch/arm64/kernel/entry.S64
-rw-r--r--arch/arm64/kernel/signal32.c2
-rw-r--r--arch/arm64/kernel/traps.c50
-rw-r--r--arch/arm64/kvm/emulate.c5
-rw-r--r--arch/arm64/kvm/handle_exit.c39
-rw-r--r--arch/arm64/kvm/hyp.S17
-rw-r--r--arch/arm64/kvm/inject_fault.c14
-rw-r--r--arch/arm64/kvm/sys_regs.c23
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/mmu.c20
-rw-r--r--arch/x86/kvm/mmu.h12
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c27
18 files changed, 296 insertions, 234 deletions
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 72674f4c3871..92bbae381598 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,40 +18,90 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
-#define ESR_EL1_WRITE (1 << 6)
-#define ESR_EL1_CM (1 << 8)
-#define ESR_EL1_IL (1 << 25)
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08)
+/* Unallocated EC: 0x09 - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12)
+#define ESR_ELx_EC_SMC32 (0x13)
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16)
+#define ESR_ELx_EC_SMC64 (0x17)
+#define ESR_ELx_EC_SYS64 (0x18)
+/* Unallocated EC: 0x19 - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f)
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A)
+/* Unallocted EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
-#define ESR_EL1_EC_SHIFT (26)
-#define ESR_EL1_EC_UNKNOWN (0x00)
-#define ESR_EL1_EC_WFI (0x01)
-#define ESR_EL1_EC_CP15_32 (0x03)
-#define ESR_EL1_EC_CP15_64 (0x04)
-#define ESR_EL1_EC_CP14_MR (0x05)
-#define ESR_EL1_EC_CP14_LS (0x06)
-#define ESR_EL1_EC_FP_ASIMD (0x07)
-#define ESR_EL1_EC_CP10_ID (0x08)
-#define ESR_EL1_EC_CP14_64 (0x0C)
-#define ESR_EL1_EC_ILL_ISS (0x0E)
-#define ESR_EL1_EC_SVC32 (0x11)
-#define ESR_EL1_EC_SVC64 (0x15)
-#define ESR_EL1_EC_SYS64 (0x18)
-#define ESR_EL1_EC_IABT_EL0 (0x20)
-#define ESR_EL1_EC_IABT_EL1 (0x21)
-#define ESR_EL1_EC_PC_ALIGN (0x22)
-#define ESR_EL1_EC_DABT_EL0 (0x24)
-#define ESR_EL1_EC_DABT_EL1 (0x25)
-#define ESR_EL1_EC_SP_ALIGN (0x26)
-#define ESR_EL1_EC_FP_EXC32 (0x28)
-#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERROR (0x2F)
-#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
-#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
-#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
-#define ESR_EL1_EC_SOFTSTP_EL1 (0x33)
-#define ESR_EL1_EC_WATCHPT_EL0 (0x34)
-#define ESR_EL1_EC_WATCHPT_EL1 (0x35)
-#define ESR_EL1_EC_BKPT32 (0x38)
-#define ESR_EL1_EC_BRK64 (0x3C)
+#define ESR_ELx_EC_SHIFT (26)
+#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
+
+#define ESR_ELx_IL (UL(1) << 25)
+#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISV (UL(1) << 24)
+#define ESR_ELx_SAS_SHIFT (22)
+#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
+#define ESR_ELx_SSE (UL(1) << 21)
+#define ESR_ELx_SRT_SHIFT (16)
+#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
+#define ESR_ELx_SF (UL(1) << 15)
+#define ESR_ELx_AR (UL(1) << 14)
+#define ESR_ELx_EA (UL(1) << 9)
+#define ESR_ELx_CM (UL(1) << 8)
+#define ESR_ELx_S1PTW (UL(1) << 7)
+#define ESR_ELx_WNR (UL(1) << 6)
+#define ESR_ELx_FSC (0x3F)
+#define ESR_ELx_FSC_TYPE (0x3C)
+#define ESR_ELx_FSC_EXTABT (0x10)
+#define ESR_ELx_FSC_FAULT (0x04)
+#define ESR_ELx_FSC_PERM (0x0C)
+#define ESR_ELx_CV (UL(1) << 24)
+#define ESR_ELx_COND_SHIFT (20)
+#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
+#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+
+const char *esr_get_class_string(u32 esr);
+#endif /* __ASSEMBLY */
#endif /* __ASM_ESR_H */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 3da2d3acec0b..94674eb7e7bb 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__
+#include <asm/esr.h>
#include <asm/memory.h>
#include <asm/types.h>
@@ -184,79 +185,11 @@
#define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F)
-/* Exception Syndrome Register (ESR) bits */
-#define ESR_EL2_EC_SHIFT (26)
-#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT)
-#define ESR_EL2_IL (UL(1) << 25)
-#define ESR_EL2_ISS (ESR_EL2_IL - 1)
-#define ESR_EL2_ISV_SHIFT (24)
-#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
-#define ESR_EL2_SAS_SHIFT (22)
-#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
-#define ESR_EL2_SSE (1 << 21)
-#define ESR_EL2_SRT_SHIFT (16)
-#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
-#define ESR_EL2_SF (1 << 15)
-#define ESR_EL2_AR (1 << 14)
-#define ESR_EL2_EA (1 << 9)
-#define ESR_EL2_CM (1 << 8)
-#define ESR_EL2_S1PTW (1 << 7)
-#define ESR_EL2_WNR (1 << 6)
-#define ESR_EL2_FSC (0x3f)
-#define ESR_EL2_FSC_TYPE (0x3c)
-
-#define ESR_EL2_CV_SHIFT (24)
-#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
-#define ESR_EL2_COND_SHIFT (20)
-#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
-
-
-#define FSC_FAULT (0x04)
-#define FSC_PERM (0x0c)
+/* For compatibility with fault code shared with 32-bit */
+#define FSC_FAULT ESR_ELx_FSC_FAULT
+#define FSC_PERM ESR_ELx_FSC_PERM
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
-#define ESR_EL2_EC_UNKNOWN (0x00)
-#define ESR_EL2_EC_WFI (0x01)
-#define ESR_EL2_EC_CP15_32 (0x03)
-#define ESR_EL2_EC_CP15_64 (0x04)
-#define ESR_EL2_EC_CP14_MR (0x05)
-#define ESR_EL2_EC_CP14_LS (0x06)
-#define ESR_EL2_EC_FP_ASIMD (0x07)
-#define ESR_EL2_EC_CP10_ID (0x08)
-#define ESR_EL2_EC_CP14_64 (0x0C)
-#define ESR_EL2_EC_ILL_ISS (0x0E)
-#define ESR_EL2_EC_SVC32 (0x11)
-#define ESR_EL2_EC_HVC32 (0x12)
-#define ESR_EL2_EC_SMC32 (0x13)
-#define ESR_EL2_EC_SVC64 (0x15)
-#define ESR_EL2_EC_HVC64 (0x16)
-#define ESR_EL2_EC_SMC64 (0x17)
-#define ESR_EL2_EC_SYS64 (0x18)
-#define ESR_EL2_EC_IABT (0x20)
-#define ESR_EL2_EC_IABT_HYP (0x21)
-#define ESR_EL2_EC_PC_ALIGN (0x22)
-#define ESR_EL2_EC_DABT (0x24)
-#define ESR_EL2_EC_DABT_HYP (0x25)
-#define ESR_EL2_EC_SP_ALIGN (0x26)
-#define ESR_EL2_EC_FP_EXC32 (0x28)
-#define ESR_EL2_EC_FP_EXC64 (0x2C)
-#define ESR_EL2_EC_SERROR (0x2F)
-#define ESR_EL2_EC_BREAKPT (0x30)
-#define ESR_EL2_EC_BREAKPT_HYP (0x31)
-#define ESR_EL2_EC_SOFTSTP (0x32)
-#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
-#define ESR_EL2_EC_WATCHPT (0x34)
-#define ESR_EL2_EC_WATCHPT_HYP (0x35)
-#define ESR_EL2_EC_BKPT32 (0x38)
-#define ESR_EL2_EC_VECTOR32 (0x3A)
-#define ESR_EL2_EC_BRK64 (0x3C)
-
-#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
-
-#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
-
-#define ESR_EL2_HVC_IMM_MASK ((1UL << 16) - 1)
-
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index b3f1defcb081..c3baa971edab 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -23,8 +23,10 @@
#define __ARM64_KVM_EMULATE_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
+
+#include <asm/esr.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
@@ -129,68 +131,68 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_HVC_IMM_MASK;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
}
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
}
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
}
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
- return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT;
+ return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
}
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
- return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT);
+ return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL);
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
}
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT;
+ return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT;
+ return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
+ return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index fd4fa374e5d2..02e6af117762 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid)
el1_sync:
kernel_entry 1
mrs x1, esr_el1 // read the syndrome register
- lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
+ lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
+ cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg
b el1_inv
el1_da:
@@ -318,7 +318,7 @@ el1_dbg:
/*
* Debug exception handling
*/
- cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
@@ -375,26 +375,26 @@ el1_preempt:
el0_sync:
kernel_entry 0
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
@@ -403,30 +403,30 @@ el0_sync:
el0_sync_compat:
kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register
- lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
- cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat
- cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
+ cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da
- cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
+ cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia
- cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
+ cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc
- cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
+ cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
- cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
+ cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
+ cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
+ cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
b.eq el0_undef
- cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg
b el0_inv
el0_svc_compat:
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 5a1ba6e80d4e..192d900c058f 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -501,7 +501,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */
- __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) <<
+ __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0a801e3743d5..1ef2940df13c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -33,6 +33,7 @@
#include <asm/atomic.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
return sys_ni_syscall();
}
+static const char *esr_class_str[] = {
+ [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
+ [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
+ [ESR_ELx_EC_WFx] = "WFI/WFE",
+ [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
+ [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
+ [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
+ [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
+ [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
+ [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
+ [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
+ [ESR_ELx_EC_ILL] = "PSTATE.IL",
+ [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
+ [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
+ [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
+ [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
+ [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
+ [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
+ [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
+ [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
+ [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
+ [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
+ [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
+ [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
+ [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
+ [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
+ [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
+ [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
+ [ESR_ELx_EC_SERROR] = "SError",
+ [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
+ [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
+ [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
+ [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
+ [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
+ [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
+ [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
+ [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
+ [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
+};
+
+const char *esr_get_class_string(u32 esr)
+{
+ return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
+}
+
/*
* bad_mode handles the impossible case in the exception vector.
*/
@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
- pr_crit("Bad mode in %s handler detected, code 0x%08x\n",
- handler[reason], esr);
+ pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
+ handler[reason], esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
index 124418d17049..f87d8fbaa48d 100644
--- a/arch/arm64/kvm/emulate.c
+++ b/arch/arm64/kvm/emulate.c
@@ -22,6 +22,7 @@
*/
#include <linux/kvm_host.h>
+#include <asm/esr.h>
#include <asm/kvm_emulate.h>
/*
@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
- if (esr & ESR_EL2_CV)
- return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+ if (esr & ESR_ELx_CV)
+ return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
return -1;
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 6a7eb3ce7027..524fa25671fc 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -21,8 +21,10 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
+
+#include <asm/esr.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h>
@@ -67,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) {
+ if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
kvm_vcpu_on_spin(vcpu);
} else {
@@ -81,29 +83,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static exit_handle_fn arm_exit_handlers[] = {
- [ESR_EL2_EC_WFI] = kvm_handle_wfx,
- [ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
- [ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
- [ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32,
- [ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
- [ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64,
- [ESR_EL2_EC_HVC32] = handle_hvc,
- [ESR_EL2_EC_SMC32] = handle_smc,
- [ESR_EL2_EC_HVC64] = handle_hvc,
- [ESR_EL2_EC_SMC64] = handle_smc,
- [ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,
- [ESR_EL2_EC_IABT] = kvm_handle_guest_abort,
- [ESR_EL2_EC_DABT] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_WFx] = kvm_handle_wfx,
+ [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
+ [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
+ [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
+ [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
+ [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
+ [ESR_ELx_EC_HVC32] = handle_hvc,
+ [ESR_ELx_EC_SMC32] = handle_smc,
+ [ESR_ELx_EC_HVC64] = handle_hvc,
+ [ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
- u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) {
- kvm_err("Unknown exception class: hsr: %#08x\n",
- (unsigned int)kvm_vcpu_get_hsr(vcpu));
+ kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
+ hsr, esr_get_class_string(hsr));
BUG();
}
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index d9c43447eb04..31b4911b8522 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,15 +17,16 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/memory.h>
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
#include <asm/fpsimdmacros.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/memory.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -1162,9 +1163,9 @@ el1_sync: // Guest trapped into EL2
push x2, x3
mrs x1, esr_el2
- lsr x2, x1, #ESR_EL2_EC_SHIFT
+ lsr x2, x1, #ESR_ELx_EC_SHIFT
- cmp x2, #ESR_EL2_EC_HVC64
+ cmp x2, #ESR_ELx_EC_HVC64
b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
@@ -1199,13 +1200,13 @@ el1_trap:
* x1: ESR
* x2: ESR_EC
*/
- cmp x2, #ESR_EL2_EC_DABT
- mov x0, #ESR_EL2_EC_IABT
+ cmp x2, #ESR_ELx_EC_DABT_LOW
+ mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */
- and x2, x1, #ESR_EL2_FSC_TYPE
+ and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 81a02a8762b0..f02530e726f6 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
* instruction set. Report an external synchronous abort.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
/*
* Here, the guest runs in AArch64 mode when in EL1. If we get
* an AArch32 fault, it means we managed to trap an EL0 fault.
*/
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
- esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
else
- esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT);
+ esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt)
- esr |= ESR_EL1_EC_DABT_EL0;
+ esr |= ESR_ELx_EC_DABT_LOW;
- vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT;
+ vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
}
static void inject_undef64(struct kvm_vcpu *vcpu)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT);
+ u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
*vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
* set.
*/
if (kvm_vcpu_trap_il_is32bit(vcpu))
- esr |= ESR_EL1_IL;
+ esr |= ESR_ELx_IL;
vcpu_sys_reg(vcpu, ESR_EL1) = esr;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 8c30f265198e..7ad7af51856f 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -20,17 +20,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/mm.h>
#include <linux/kvm_host.h>
+#include <linux/mm.h>
#include <linux/uaccess.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/kvm_coproc.h>
-#include <asm/kvm_mmu.h>
+
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
+#include <asm/esr.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_coproc.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_mmu.h>
+
#include <trace/events/kvm.h>
#include "sys_regs.h"
@@ -851,12 +854,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
int cp;
switch(hsr_ec) {
- case ESR_EL2_EC_CP15_32:
- case ESR_EL2_EC_CP15_64:
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
cp = 15;
break;
- case ESR_EL2_EC_CP14_MR:
- case ESR_EL2_EC_CP14_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_64:
cp = 14;
break;
default:
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c11cd27ca8f5..96da13167d4a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC;
- } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) {
+ } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
}
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3ceddf41ca74..843bea0e70fd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -51,7 +51,7 @@
| X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
#define CR3_L_MODE_RESERVED_BITS 0xFFFFFF0000000000ULL
-#define CR3_PCID_INVD (1UL << 63)
+#define CR3_PCID_INVD BIT_64(63)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
@@ -160,6 +160,18 @@ enum {
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff2bff
+#define PFERR_PRESENT_BIT 0
+#define PFERR_WRITE_BIT 1
+#define PFERR_USER_BIT 2
+#define PFERR_RSVD_BIT 3
+#define PFERR_FETCH_BIT 4
+
+#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
+#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
+#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
+#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
+#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
+
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
/*
@@ -615,6 +627,8 @@ struct kvm_arch {
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
#endif
+
+ bool boot_vcpu_runs_old_kvmclock;
};
struct kvm_vm_stat {
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d949287ed010..ef23c1e5fa9f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4909,8 +4909,12 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
- if (rc != X86EMUL_CONTINUE)
+ if (rc != X86EMUL_CONTINUE) {
+ if (rc == X86EMUL_PROPAGATE_FAULT &&
+ ctxt->exception.vector == PF_VECTOR)
+ ctxt->exception.error_code |= PFERR_WRITE_MASK;
goto done;
+ }
}
ctxt->dst.orig_val = ctxt->dst.val;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3aa46aaa8cb3..0ed9f795e4f0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -532,6 +532,11 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
return (old_spte & bit_mask) && !(new_spte & bit_mask);
}
+static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask)
+{
+ return (old_spte & bit_mask) != (new_spte & bit_mask);
+}
+
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
@@ -582,6 +587,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
if (!shadow_accessed_mask)
return ret;
+ /*
+ * Flush TLB when accessed/dirty bits are changed in the page tables,
+ * to guarantee consistency between TLB and page tables.
+ */
+ if (spte_is_bit_changed(old_spte, new_spte,
+ shadow_accessed_mask | shadow_dirty_mask))
+ ret = true;
+
if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
@@ -4289,6 +4302,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
struct kvm_memory_slot *memslot;
gfn_t last_gfn;
int i;
+ bool flush = false;
memslot = id_to_memslot(kvm->memslots, slot);
last_gfn = memslot->base_gfn + memslot->npages - 1;
@@ -4305,7 +4319,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
for (index = 0; index <= last_index; ++index, ++rmapp) {
if (*rmapp)
- __rmap_write_protect(kvm, rmapp, false);
+ flush |= __rmap_write_protect(kvm, rmapp,
+ false);
if (need_resched() || spin_needbreak(&kvm->mmu_lock))
cond_resched_lock(&kvm->mmu_lock);
@@ -4332,7 +4347,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
* instead of PT_WRITABLE_MASK, that means it does not depend
* on PT_WRITABLE_MASK anymore.
*/
- kvm_flush_remote_tlbs(kvm);
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
}
#define BATCH_ZAP_PAGES 10
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index a7f9a121690d..c7d65637c851 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,18 +44,6 @@
#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1
-#define PFERR_PRESENT_BIT 0
-#define PFERR_WRITE_BIT 1
-#define PFERR_USER_BIT 2
-#define PFERR_RSVD_BIT 3
-#define PFERR_FETCH_BIT 4
-
-#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
-#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
-#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
-#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
-#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
-
static inline u64 rsvd_bits(int s, int e)
{
return ((1ULL << (e - s + 1)) - 1) << s;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ce350718eb88..c987374d92c1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -964,16 +964,6 @@ static inline bool cpu_has_vmx_ept_execute_only(void)
return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
}
-static inline bool cpu_has_vmx_eptp_uncacheable(void)
-{
- return vmx_capability.ept & VMX_EPTP_UC_BIT;
-}
-
-static inline bool cpu_has_vmx_eptp_writeback(void)
-{
- return vmx_capability.ept & VMX_EPTP_WB_BIT;
-}
-
static inline bool cpu_has_vmx_ept_2m_page(void)
{
return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 556dfb4efc43..d2bbb2d86610 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -496,7 +496,7 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
-int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
+static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
void *data, int offset, int len, u32 access)
{
return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
@@ -647,7 +647,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
}
}
-int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
u64 old_xcr0 = vcpu->arch.xcr0;
@@ -1193,7 +1193,7 @@ static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
#endif
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
-unsigned long max_tsc_khz;
+static unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
@@ -1247,7 +1247,7 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
-void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
+static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
@@ -1542,7 +1542,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
&ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
- && !backwards_tsc_observed;
+ && !backwards_tsc_observed
+ && !ka->boot_vcpu_runs_old_kvmclock;
if (ka->use_master_clock)
atomic_set(&kvm_guest_has_master_clock, 1);
@@ -2174,8 +2175,20 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
+ struct kvm_arch *ka = &vcpu->kvm->arch;
+
kvmclock_reset(vcpu);
+ if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
+ bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
+
+ if (ka->boot_vcpu_runs_old_kvmclock != tmp)
+ set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
+ &vcpu->requests);
+
+ ka->boot_vcpu_runs_old_kvmclock = tmp;
+ }
+
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
@@ -2752,6 +2765,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_READONLY_MEM:
case KVM_CAP_HYPERV_TIME:
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
+ case KVM_CAP_TSC_DEADLINE_TIMER:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
@@ -2790,9 +2804,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
- case KVM_CAP_TSC_DEADLINE_TIMER:
- r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
- break;
default:
r = 0;
break;