summaryrefslogtreecommitdiff
path: root/drivers/lguest
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:16:11 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 17:16:11 -0700
commite75c73ad64478c12b3a44b86a3e7f62a4f65b93e (patch)
tree9dbb1a2a4e53b480df86c49d478751b203cdccd4 /drivers/lguest
parentcfe3eceb7a2eb91284d5605c5315249bb165e9d3 (diff)
parenta8424003679e90b9952e20adcd1ff1560d9dd3e9 (diff)
downloadlwn-e75c73ad64478c12b3a44b86a3e7f62a4f65b93e.tar.gz
lwn-e75c73ad64478c12b3a44b86a3e7f62a4f65b93e.zip
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 FPU updates from Ingo Molnar: "This tree contains two main changes: - The big FPU code rewrite: wide reaching cleanups and reorganization that pulls all the FPU code together into a clean base in arch/x86/fpu/. The resulting code is leaner and faster, and much easier to understand. This enables future work to further simplify the FPU code (such as removing lazy FPU restores). By its nature these changes have a substantial regression risk: FPU code related bugs are long lived, because races are often subtle and bugs mask as user-space failures that are difficult to track back to kernel side backs. I'm aware of no unfixed (or even suspected) FPU related regression so far. - MPX support rework/fixes. As this is still not a released CPU feature, there were some buglets in the code - should be much more robust now (Dave Hansen)" * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (250 commits) x86/fpu: Fix double-increment in setup_xstate_features() x86/mpx: Allow 32-bit binaries on 64-bit kernels again x86/mpx: Do not count MPX VMAs as neighbors when unmapping x86/mpx: Rewrite the unmap code x86/mpx: Support 32-bit binaries on 64-bit kernels x86/mpx: Use 32-bit-only cmpxchg() for 32-bit apps x86/mpx: Introduce new 'directory entry' to 'addr' helper function x86/mpx: Add temporary variable to reduce masking x86: Make is_64bit_mm() widely available x86/mpx: Trace allocation of new bounds tables x86/mpx: Trace the attempts to find bounds tables x86/mpx: Trace entry to bounds exception paths x86/mpx: Trace #BR exceptions x86/mpx: Introduce a boot-time disable flag x86/mpx: Restrict the mmap() size check to bounds tables x86/mpx: Remove redundant MPX_BNDCFG_ADDR_MASK x86/mpx: Clean up the code by not passing a task pointer around when unnecessary x86/mpx: Use the new get_xsave_field_ptr()API x86/fpu/xstate: Wrap get_xsave_addr() to make it safer x86/fpu/xstate: Fix up bad get_xsave_addr() assumptions ...
Diffstat (limited to 'drivers/lguest')
-rw-r--r--drivers/lguest/x86/core.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 30f2aef69d78..6a4cd771a2be 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -46,7 +46,7 @@
#include <asm/setup.h>
#include <asm/lguest.h>
#include <asm/uaccess.h>
-#include <asm/i387.h>
+#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
#include "../lg.h"
@@ -251,7 +251,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
* we set it now, so we can trap and pass that trap to the Guest if it
* uses the FPU.
*/
- if (cpu->ts && user_has_fpu())
+ if (cpu->ts && fpregs_active())
stts();
/*
@@ -283,7 +283,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
/* Clear the host TS bit if it was set above. */
- if (cpu->ts && user_has_fpu())
+ if (cpu->ts && fpregs_active())
clts();
/*
@@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
/*
* Similarly, if we took a trap because the Guest used the FPU,
* we have to restore the FPU it expects to see.
- * math_state_restore() may sleep and we may even move off to
+ * fpu__restore() may sleep and we may even move off to
* a different CPU. So all the critical stuff should be done
* before this.
*/
- else if (cpu->regs->trapnum == 7 && !user_has_fpu())
- math_state_restore();
+ else if (cpu->regs->trapnum == 7 && !fpregs_active())
+ fpu__restore(&current->thread.fpu);
}
/*H:130