Browse Source

Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
 "Sorry, meant to push out this batch earlier this weekend"

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu, amd: Clear exceptions in AMD FXSAVE workaround
  ftrace/x86: Load ftrace_ops in parameter not the variable holding it
Linus Torvalds 11 years ago
parent
commit
061f49ec2d
3 changed files with 10 additions and 9 deletions
  1. 7 6
      arch/x86/include/asm/fpu-internal.h
  2. 2 2
      arch/x86/kernel/entry_32.S
  3. 1 1
      arch/x86/kernel/entry_64.S

+ 7 - 6
arch/x86/include/asm/fpu-internal.h

@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
 	/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
 	   is pending.  Clear the x87 state here by setting it to fixed
 	   is pending.  Clear the x87 state here by setting it to fixed
 	   values. "m" is a random variable that should be in L1 */
 	   values. "m" is a random variable that should be in L1 */
-	alternative_input(
-		ASM_NOP8 ASM_NOP2,
-		"emms\n\t"		/* clear stack tags */
-		"fildl %P[addr]",	/* set F?P to defined value */
-		X86_FEATURE_FXSAVE_LEAK,
-		[addr] "m" (tsk->thread.fpu.has_fpu));
+	if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+		asm volatile(
+			"fnclex\n\t"
+			"emms\n\t"
+			"fildl %P[addr]"	/* set F?P to defined value */
+			: : [addr] "m" (tsk->thread.fpu.has_fpu));
+	}
 
 
 	return fpu_restore_checking(&tsk->thread.fpu);
 	return fpu_restore_checking(&tsk->thread.fpu);
 }
 }

+ 2 - 2
arch/x86/kernel/entry_32.S

@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
 	pushl $0	/* Pass NULL as regs pointer */
 	pushl $0	/* Pass NULL as regs pointer */
 	movl 4*4(%esp), %eax
 	movl 4*4(%esp), %eax
 	movl 0x4(%ebp), %edx
 	movl 0x4(%ebp), %edx
-	leal function_trace_op, %ecx
+	movl function_trace_op, %ecx
 	subl $MCOUNT_INSN_SIZE, %eax
 	subl $MCOUNT_INSN_SIZE, %eax
 
 
 .globl ftrace_call
 .globl ftrace_call
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
 	movl 12*4(%esp), %eax	/* Load ip (1st parameter) */
 	movl 12*4(%esp), %eax	/* Load ip (1st parameter) */
 	subl $MCOUNT_INSN_SIZE, %eax	/* Adjust ip */
 	subl $MCOUNT_INSN_SIZE, %eax	/* Adjust ip */
 	movl 0x4(%ebp), %edx	/* Load parent ip (2nd parameter) */
 	movl 0x4(%ebp), %edx	/* Load parent ip (2nd parameter) */
-	leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+	movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
 	pushl %esp		/* Save pt_regs as 4th parameter */
 	pushl %esp		/* Save pt_regs as 4th parameter */
 
 
 GLOBAL(ftrace_regs_call)
 GLOBAL(ftrace_regs_call)

+ 1 - 1
arch/x86/kernel/entry_64.S

@@ -88,7 +88,7 @@ END(function_hook)
 	MCOUNT_SAVE_FRAME \skip
 	MCOUNT_SAVE_FRAME \skip
 
 
 	/* Load the ftrace_ops into the 3rd parameter */
 	/* Load the ftrace_ops into the 3rd parameter */
-	leaq function_trace_op, %rdx
+	movq function_trace_op(%rip), %rdx
 
 
 	/* Load ip into the first parameter */
 	/* Load ip into the first parameter */
 	movq RIP(%rsp), %rdi
 	movq RIP(%rsp), %rdi