瀏覽代碼

x86/debug: Drop several unnecessary CFI annotations

With the conversion of the register saving code from macros to
functions, and with those functions not clobbering most of the
registers they spill, there's no need to annotate most of the
spill operations; the only exceptions being %rbx (always
modified) and %rcx (modified on the error_kernelspace: path).

Also remove a bogus commented out annotation - there's no
register %orig_rax after all.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Link: http://lkml.kernel.org/r/53AAE69A020000780001D3C7@mail.emea.novell.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Jan Beulich 11 年之前
父節點
當前提交
3bab13b015
共有 1 個文件被更改,包括 26 次插入26 次删除
  1. 26 26
      arch/x86/kernel/entry_64.S

+ 26 - 26
arch/x86/kernel/entry_64.S

@@ -207,7 +207,6 @@ ENDPROC(native_usergs_sysret64)
  */
  */
 	.macro XCPT_FRAME start=1 offset=0
 	.macro XCPT_FRAME start=1 offset=0
 	INTR_FRAME \start, RIP+\offset-ORIG_RAX
 	INTR_FRAME \start, RIP+\offset-ORIG_RAX
-	/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
 	.endm
 	.endm
 
 
 /*
 /*
@@ -287,21 +286,21 @@ ENDPROC(native_usergs_sysret64)
 ENTRY(save_paranoid)
 ENTRY(save_paranoid)
 	XCPT_FRAME 1 RDI+8
 	XCPT_FRAME 1 RDI+8
 	cld
 	cld
-	movq_cfi rdi, RDI+8
-	movq_cfi rsi, RSI+8
+	movq %rdi, RDI+8(%rsp)
+	movq %rsi, RSI+8(%rsp)
 	movq_cfi rdx, RDX+8
 	movq_cfi rdx, RDX+8
 	movq_cfi rcx, RCX+8
 	movq_cfi rcx, RCX+8
 	movq_cfi rax, RAX+8
 	movq_cfi rax, RAX+8
-	movq_cfi r8, R8+8
-	movq_cfi r9, R9+8
-	movq_cfi r10, R10+8
-	movq_cfi r11, R11+8
+	movq %r8, R8+8(%rsp)
+	movq %r9, R9+8(%rsp)
+	movq %r10, R10+8(%rsp)
+	movq %r11, R11+8(%rsp)
 	movq_cfi rbx, RBX+8
 	movq_cfi rbx, RBX+8
-	movq_cfi rbp, RBP+8
-	movq_cfi r12, R12+8
-	movq_cfi r13, R13+8
-	movq_cfi r14, R14+8
-	movq_cfi r15, R15+8
+	movq %rbp, RBP+8(%rsp)
+	movq %r12, R12+8(%rsp)
+	movq %r13, R13+8(%rsp)
+	movq %r14, R14+8(%rsp)
+	movq %r15, R15+8(%rsp)
 	movl $1,%ebx
 	movl $1,%ebx
 	movl $MSR_GS_BASE,%ecx
 	movl $MSR_GS_BASE,%ecx
 	rdmsr
 	rdmsr
@@ -1395,21 +1394,21 @@ ENTRY(error_entry)
 	CFI_ADJUST_CFA_OFFSET 15*8
 	CFI_ADJUST_CFA_OFFSET 15*8
 	/* oldrax contains error code */
 	/* oldrax contains error code */
 	cld
 	cld
-	movq_cfi rdi, RDI+8
-	movq_cfi rsi, RSI+8
-	movq_cfi rdx, RDX+8
-	movq_cfi rcx, RCX+8
-	movq_cfi rax, RAX+8
-	movq_cfi  r8,  R8+8
-	movq_cfi  r9,  R9+8
-	movq_cfi r10, R10+8
-	movq_cfi r11, R11+8
+	movq %rdi, RDI+8(%rsp)
+	movq %rsi, RSI+8(%rsp)
+	movq %rdx, RDX+8(%rsp)
+	movq %rcx, RCX+8(%rsp)
+	movq %rax, RAX+8(%rsp)
+	movq  %r8,  R8+8(%rsp)
+	movq  %r9,  R9+8(%rsp)
+	movq %r10, R10+8(%rsp)
+	movq %r11, R11+8(%rsp)
 	movq_cfi rbx, RBX+8
 	movq_cfi rbx, RBX+8
-	movq_cfi rbp, RBP+8
-	movq_cfi r12, R12+8
-	movq_cfi r13, R13+8
-	movq_cfi r14, R14+8
-	movq_cfi r15, R15+8
+	movq %rbp, RBP+8(%rsp)
+	movq %r12, R12+8(%rsp)
+	movq %r13, R13+8(%rsp)
+	movq %r14, R14+8(%rsp)
+	movq %r15, R15+8(%rsp)
 	xorl %ebx,%ebx
 	xorl %ebx,%ebx
 	testl $3,CS+8(%rsp)
 	testl $3,CS+8(%rsp)
 	je error_kernelspace
 	je error_kernelspace
@@ -1427,6 +1426,7 @@ error_sti:
  * compat mode. Check for these here too.
  * compat mode. Check for these here too.
  */
  */
 error_kernelspace:
 error_kernelspace:
+	CFI_REL_OFFSET rcx, RCX+8
 	incl %ebx
 	incl %ebx
 	leaq irq_return_iret(%rip),%rcx
 	leaq irq_return_iret(%rip),%rcx
 	cmpq %rcx,RIP+8(%rsp)
 	cmpq %rcx,RIP+8(%rsp)