summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-04-27 07:18:17 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 15:47:51 +0200
commitc4d72e2db3a36bf560b506df8a3490f140aeae26 (patch)
tree934cc8c80417fb8a5845a3d5f5da1dc4fc819278
parent0ee6a5172573aea06ef41f4e48737dcfab0099bb (diff)
x86/fpu: Simplify fpstate_init_curr() usage
Now that fpstate_init_curr() is not doing implicit allocations anymore, almost all uses of it involve a very simple pattern: if (!fpu->fpstate_active) fpstate_init_curr(fpu); which is basically activating the FPU fpstate if it was not active before. So propagate the check into the function itself, and rename the function according to its new purpose: fpu__activate_curr(fpu); Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/kernel/fpu/core.c21
-rw-r--r--arch/x86/kernel/fpu/xsave.c3
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
5 files changed, 15 insertions, 17 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 1345ab3dd273..de19fc53f54e 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
-extern void fpstate_init_curr(struct fpu *fpu);
+extern void fpu__activate_curr(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern void fpu__clear(struct task_struct *tsk);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 97b4f9e76f60..74cc32265918 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -259,19 +259,21 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
}
/*
- * Initialize the current task's in-memory FPU context:
+ * Activate the current task's in-memory FPU context,
+ * if it has not been used before:
*/
-void fpstate_init_curr(struct fpu *fpu)
+void fpu__activate_curr(struct fpu *fpu)
{
WARN_ON_ONCE(fpu != &current->thread.fpu);
- WARN_ON_ONCE(fpu->fpstate_active);
- fpstate_init(fpu);
+ if (!fpu->fpstate_active) {
+ fpstate_init(fpu);
- /* Safe to do for the current task: */
- fpu->fpstate_active = 1;
+ /* Safe to do for the current task: */
+ fpu->fpstate_active = 1;
+ }
}
-EXPORT_SYMBOL_GPL(fpstate_init_curr);
+EXPORT_SYMBOL_GPL(fpu__activate_curr);
/*
* This function is called before we modify a stopped child's
@@ -325,8 +327,7 @@ void fpu__restore(void)
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
@@ -352,7 +353,7 @@ void fpu__clear(struct task_struct *tsk)
drop_fpu(fpu);
} else {
if (!fpu->fpstate_active) {
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
user_fpu_begin();
}
restore_init_xstate();
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index 49d9f3dcc2ea..f549e2a44336 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -358,8 +358,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 92a8490cc69d..9ff4df77e069 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6601,8 +6601,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
sigset_t sigsaved;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 4c6ab791d0e5..5b850514eb68 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -149,8 +149,7 @@ void math_emulate(struct math_emu_info *info)
struct desc_struct code_descriptor;
struct fpu *fpu = &current->thread.fpu;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {