Browse Source

x86/fpu: Factor out fpu__flush_thread() from flush_thread()

flush_thread() open codes a lot of FPU internals - create a separate
function for it in fpu/core.c.

Turns out that this does not hurt performance:

   text    data     bss     dec     hex filename
   11843039        1884440 1130496 14857975         e2b6f7 vmlinux.before
   11843039        1884440 1130496 14857975         e2b6f7 vmlinux.after

and since this is a slowpath clarity comes first anyway.

We can reconsider inlining decisions after the FPU code has been cleaned up.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 10 years ago
parent
commit
81683cc827
3 changed files with 19 additions and 13 deletions
  1. 1 0
      arch/x86/include/asm/i387.h
  2. 17 0
      arch/x86/kernel/fpu/core.c
  3. 1 13
      arch/x86/kernel/process.c

+ 1 - 0
arch/x86/include/asm/i387.h

@@ -20,6 +20,7 @@ struct user_i387_struct;
 
 extern int fpstate_alloc_init(struct task_struct *curr);
 extern void fpstate_init(struct fpu *fpu);
+extern void fpu__flush_thread(struct task_struct *tsk);
 
 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
 extern void math_state_restore(void);

+ 17 - 0
arch/x86/kernel/fpu/core.c

@@ -227,6 +227,23 @@ static int fpu__unlazy_stopped(struct task_struct *child)
 	return 0;
 }
 
+void fpu__flush_thread(struct task_struct *tsk)
+{
+	if (!use_eager_fpu()) {
+		/* FPU state will be reallocated lazily at the first use. */
+		drop_fpu(tsk);
+		fpstate_free(&tsk->thread.fpu);
+	} else {
+		if (!tsk_used_math(tsk)) {
+			/* kthread execs. TODO: cleanup this horror. */
+		if (WARN_ON(fpstate_alloc_init(tsk)))
+				force_sig(SIGKILL, tsk);
+			user_fpu_begin();
+		}
+		restore_init_xstate();
+	}
+}
+
 /*
  * The xstateregs_active() routine is the same as the fpregs_active() routine,
  * as the "regset->n" for the xstate regset will be updated based on the feature

+ 1 - 13
arch/x86/kernel/process.c

@@ -146,19 +146,7 @@ void flush_thread(void)
 	flush_ptrace_hw_breakpoint(tsk);
 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
 
-	if (!use_eager_fpu()) {
-		/* FPU state will be reallocated lazily at the first use. */
-		drop_fpu(tsk);
-		fpstate_free(&tsk->thread.fpu);
-	} else {
-		if (!tsk_used_math(tsk)) {
-			/* kthread execs. TODO: cleanup this horror. */
-		if (WARN_ON(fpstate_alloc_init(tsk)))
-				force_sig(SIGKILL, tsk);
-			user_fpu_begin();
-		}
-		restore_init_xstate();
-	}
+	fpu__flush_thread(tsk);
 }
 
 static void hard_disable_TSC(void)