Browse Source

Merge tag 'tip_x86_fpu' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/fpu

Pull x86/fpu updates from Borislav Petkov:

 "Three more cleanups/improvements to the FPU handling code. (Oleg Nesterov)"

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Ingo Molnar 10 years ago
parent
commit
ae486033b9
3 changed files with 14 additions and 17 deletions
  1. 6 5
      arch/x86/kernel/i387.c
  2. 7 0
      arch/x86/kernel/process.c
  3. 1 12
      arch/x86/kernel/xsave.c

+ 6 - 5
arch/x86/kernel/i387.c

@@ -41,8 +41,8 @@ void kernel_fpu_enable(void)
  * be set (so that the clts/stts pair does nothing that is
  * visible in the interrupted kernel thread).
  *
- * Except for the eagerfpu case when we return 1 unless we've already
- * been eager and saved the state in kernel_fpu_begin().
+ * Except for the eagerfpu case when we return true; in the likely case
+ * the thread has FPU but we are not going to set/clear TS.
  */
 static inline bool interrupted_kernel_fpu_idle(void)
 {
@@ -50,7 +50,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
 		return false;
 
 	if (use_eager_fpu())
-		return __thread_has_fpu(current);
+		return true;
 
 	return !__thread_has_fpu(current) &&
 		(read_cr0() & X86_CR0_TS);
@@ -93,9 +93,10 @@ void __kernel_fpu_begin(void)
 
 	if (__thread_has_fpu(me)) {
 		__save_init_fpu(me);
-	} else if (!use_eager_fpu()) {
+	} else {
 		this_cpu_write(fpu_owner_task, NULL);
-		clts();
+		if (!use_eager_fpu())
+			clts();
 	}
 }
 EXPORT_SYMBOL(__kernel_fpu_begin);

+ 7 - 0
arch/x86/kernel/process.c

@@ -130,6 +130,7 @@ void flush_thread(void)
 
 	flush_ptrace_hw_breakpoint(tsk);
 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+
 	drop_init_fpu(tsk);
 	/*
 	 * Free the FPU state for non xsave platforms. They get reallocated
@@ -137,6 +138,12 @@ void flush_thread(void)
 	 */
 	if (!use_eager_fpu())
 		free_thread_xstate(tsk);
+	else if (!used_math()) {
+		/* kthread execs. TODO: cleanup this horror. */
+		if (WARN_ON(init_fpu(current)))
+			force_sig(SIGKILL, current);
+		math_state_restore();
+	}
 }
 
 static void hard_disable_TSC(void)

+ 1 - 12
arch/x86/kernel/xsave.c

@@ -688,7 +688,7 @@ void eager_fpu_init(void)
 {
 	static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
 
-	clear_used_math();
+	WARN_ON(used_math());
 	current_thread_info()->status = 0;
 
 	if (eagerfpu == ENABLE)
@@ -703,17 +703,6 @@ void eager_fpu_init(void)
 		boot_func();
 		boot_func = NULL;
 	}
-
-	/*
-	 * This is same as math_state_restore(). But use_xsave() is
-	 * not yet patched to use math_state_restore().
-	 */
-	init_fpu(current);
-	__thread_fpu_begin(current);
-	if (cpu_has_xsave)
-		xrstor_state(init_xstate_buf, -1);
-	else
-		fxrstor_checking(&init_xstate_buf->i387);
 }
 
 /*