summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/fpu/xsave.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-23 12:49:20 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:47:25 +0200
commitc5bedc6847c3be6efe0e671a6155c9a25fd468bf (patch)
treef1eb66ea2807447c95e915f281d6815578b7e627 /arch/x86/kernel/fpu/xsave.c
parentaf7f8721f1f1252473b154c38dd7583abfe3206b (diff)
downloadlwn-c5bedc6847c3be6efe0e671a6155c9a25fd468bf.tar.gz
lwn-c5bedc6847c3be6efe0e671a6155c9a25fd468bf.zip
x86/fpu: Get rid of PF_USED_MATH usage, convert it to fpu->fpstate_active
Introduce a simple fpu->fpstate_active flag in the fpu context data structure and use that instead of PF_USED_MATH in task->flags. Testing for this flag byte should be slightly more efficient than testing a bit in a bitmask, but the main advantage is that most FPU functions can now be performed on a 'struct fpu' alone, they don't need access to 'struct task_struct' anymore. There's a slight linecount increase, mostly due to the 'fpu' local variables and due to extra comments. The local variables will go away once we move most of the FPU methods to pure 'struct fpu' parameters. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/fpu/xsave.c')
-rw-r--r--arch/x86/kernel/fpu/xsave.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 8cd127049c9b..dc346e19c0df 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -334,6 +334,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
{
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int state_size = xstate_size;
u64 xstate_bv = 0;
int fx_only = 0;
@@ -349,7 +350,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!(tsk->flags & PF_USED_MATH) && fpstate_alloc_init(tsk))
+ if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
@@ -384,12 +385,12 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
int err = 0;
/*
- * Drop the current fpu which clears PF_USED_MATH. This ensures
+ * Drop the current fpu which clears fpu->fpstate_active. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
- * PF_USED_MATH is again set.
+ * fpu->fpstate_active is again set.
*/
drop_fpu(tsk);
@@ -401,7 +402,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
}
- tsk->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
if (use_eager_fpu()) {
preempt_disable();
fpu__restore();
@@ -685,7 +686,7 @@ void xsave_init(void)
*/
void __init_refok eager_fpu_init(void)
{
- WARN_ON(current->flags & PF_USED_MATH);
+ WARN_ON(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
if (eagerfpu == ENABLE)