diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-10-24 15:58:08 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-04 12:10:42 +0100 |
commit | 1e02ce4cccdcb9688386e5b8d2c9fa4660b45389 (patch) | |
tree | 7d514286844acea505228590119ac1a886cf6995 /arch/x86/kvm/vmx.c | |
parent | 375074cc736ab1d89a708c0a8d7baa4a70d5d476 (diff) | |
download | lwn-1e02ce4cccdcb9688386e5b8d2c9fa4660b45389.tar.gz lwn-1e02ce4cccdcb9688386e5b8d2c9fa4660b45389.zip |
x86: Store a per-cpu shadow copy of CR4
Context switches and TLB flushes can change individual bits of CR4.
CR4 reads take several cycles, so store a shadow copy of CR4 in a
per-cpu variable.
To avoid wasting a cache line, I added the CR4 shadow to
cpu_tlbstate, which is already touched in switch_mm. The heaviest
users of the cr4 shadow will be switch_mm and __switch_to_xtra, and
__switch_to_xtra is called shortly after switch_mm during context
switch, so the cacheline is likely to be hot.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/3a54dd3353fffbf84804398e00dfdc5b7c1afd7d.1414190806.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index db77537013d1..8dca6ccbb9ce 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2785,7 +2785,7 @@ static int hardware_enable(void) u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); u64 old, test_bits; - if (read_cr4() & X86_CR4_VMXE) + if (cr4_read_shadow() & X86_CR4_VMXE) return -EBUSY; INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu)); @@ -4255,7 +4255,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ /* Save the most likely value for this task's CR4 in the VMCS. */ - cr4 = read_cr4(); + cr4 = cr4_read_shadow(); vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */ vmx->host_state.vmcs_host_cr4 = cr4; @@ -7784,7 +7784,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); - cr4 = read_cr4(); + cr4 = cr4_read_shadow(); if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) { vmcs_writel(HOST_CR4, cr4); vmx->host_state.vmcs_host_cr4 = cr4; |