Procházet zdrojové kódy

x86/entry/32, x86/boot/32: Use local labels

Add the local label prefix to all non-function named labels in head_32.S
and entry_32.S.  In addition to decluttering the symbol table, it also
will help stack traces to be more sensible.  For example, the last
reported function in the idle task stack trace will be startup_32_smp()
instead of is486().

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/14f9f7afd478b23a762f40734da1a57c0c273f6e.1474480779.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Josh Poimboeuf před 9 roky
rodič
revize
1b00255f32
2 změnil soubory, kde provedl 38 přidání a 37 odebrání
  1. 22 21
      arch/x86/entry/entry_32.S
  2. 16 16
      arch/x86/kernel/head_32.S

+ 22 - 21
arch/x86/entry/entry_32.S

@@ -307,13 +307,13 @@ END(ret_from_exception)
 #ifdef CONFIG_PREEMPT
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
 ENTRY(resume_kernel)
 	DISABLE_INTERRUPTS(CLBR_ANY)
 	DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
+.Lneed_resched:
 	cmpl	$0, PER_CPU_VAR(__preempt_count)
 	cmpl	$0, PER_CPU_VAR(__preempt_count)
 	jnz	restore_all
 	jnz	restore_all
 	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	jz	restore_all
 	jz	restore_all
 	call	preempt_schedule_irq
 	call	preempt_schedule_irq
-	jmp	need_resched
+	jmp	.Lneed_resched
 END(resume_kernel)
 END(resume_kernel)
 #endif
 #endif
 
 
@@ -334,7 +334,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
  */
  */
 ENTRY(xen_sysenter_target)
 ENTRY(xen_sysenter_target)
 	addl	$5*4, %esp			/* remove xen-provided frame */
 	addl	$5*4, %esp			/* remove xen-provided frame */
-	jmp	sysenter_past_esp
+	jmp	.Lsysenter_past_esp
 #endif
 #endif
 
 
 /*
 /*
@@ -371,7 +371,7 @@ ENTRY(xen_sysenter_target)
  */
  */
 ENTRY(entry_SYSENTER_32)
 ENTRY(entry_SYSENTER_32)
 	movl	TSS_sysenter_sp0(%esp), %esp
 	movl	TSS_sysenter_sp0(%esp), %esp
-sysenter_past_esp:
+.Lsysenter_past_esp:
 	pushl	$__USER_DS		/* pt_regs->ss */
 	pushl	$__USER_DS		/* pt_regs->ss */
 	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
 	pushl	%ebp			/* pt_regs->sp (stashed in bp) */
 	pushfl				/* pt_regs->flags (except IF = 0) */
 	pushfl				/* pt_regs->flags (except IF = 0) */
@@ -504,9 +504,9 @@ ENTRY(entry_INT80_32)
 
 
 restore_all:
 restore_all:
 	TRACE_IRQS_IRET
 	TRACE_IRQS_IRET
-restore_all_notrace:
+.Lrestore_all_notrace:
 #ifdef CONFIG_X86_ESPFIX32
 #ifdef CONFIG_X86_ESPFIX32
-	ALTERNATIVE	"jmp restore_nocheck", "", X86_BUG_ESPFIX
+	ALTERNATIVE	"jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
 
 
 	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
 	movl	PT_EFLAGS(%esp), %eax		# mix EFLAGS, SS and CS
 	/*
 	/*
@@ -518,22 +518,23 @@ restore_all_notrace:
 	movb	PT_CS(%esp), %al
 	movb	PT_CS(%esp), %al
 	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	andl	$(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
 	cmpl	$((SEGMENT_LDT << 8) | USER_RPL), %eax
-	je ldt_ss				# returning to user-space with LDT SS
+	je .Lldt_ss				# returning to user-space with LDT SS
 #endif
 #endif
-restore_nocheck:
+.Lrestore_nocheck:
 	RESTORE_REGS 4				# skip orig_eax/error_code
 	RESTORE_REGS 4				# skip orig_eax/error_code
-irq_return:
+.Lirq_return:
 	INTERRUPT_RETURN
 	INTERRUPT_RETURN
+
 .section .fixup, "ax"
 .section .fixup, "ax"
 ENTRY(iret_exc	)
 ENTRY(iret_exc	)
 	pushl	$0				# no error code
 	pushl	$0				# no error code
 	pushl	$do_iret_error
 	pushl	$do_iret_error
 	jmp	error_code
 	jmp	error_code
 .previous
 .previous
-	_ASM_EXTABLE(irq_return, iret_exc)
+	_ASM_EXTABLE(.Lirq_return, iret_exc)
 
 
 #ifdef CONFIG_X86_ESPFIX32
 #ifdef CONFIG_X86_ESPFIX32
-ldt_ss:
+.Lldt_ss:
 /*
 /*
  * Setup and switch to ESPFIX stack
  * Setup and switch to ESPFIX stack
  *
  *
@@ -562,7 +563,7 @@ ldt_ss:
 	 */
 	 */
 	DISABLE_INTERRUPTS(CLBR_EAX)
 	DISABLE_INTERRUPTS(CLBR_EAX)
 	lss	(%esp), %esp			/* switch to espfix segment */
 	lss	(%esp), %esp			/* switch to espfix segment */
-	jmp	restore_nocheck
+	jmp	.Lrestore_nocheck
 #endif
 #endif
 ENDPROC(entry_INT80_32)
 ENDPROC(entry_INT80_32)
 
 
@@ -882,7 +883,7 @@ ftrace_call:
 	popl	%edx
 	popl	%edx
 	popl	%ecx
 	popl	%ecx
 	popl	%eax
 	popl	%eax
-ftrace_ret:
+.Lftrace_ret:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 .globl ftrace_graph_call
 .globl ftrace_graph_call
 ftrace_graph_call:
 ftrace_graph_call:
@@ -952,7 +953,7 @@ GLOBAL(ftrace_regs_call)
 	popl	%gs
 	popl	%gs
 	addl	$8, %esp			/* Skip orig_ax and ip */
 	addl	$8, %esp			/* Skip orig_ax and ip */
 	popf					/* Pop flags at end (no addl to corrupt flags) */
 	popf					/* Pop flags at end (no addl to corrupt flags) */
-	jmp	ftrace_ret
+	jmp	.Lftrace_ret
 
 
 	popf
 	popf
 	jmp	ftrace_stub
 	jmp	ftrace_stub
@@ -963,7 +964,7 @@ ENTRY(mcount)
 	jb	ftrace_stub			/* Paging not enabled yet? */
 	jb	ftrace_stub			/* Paging not enabled yet? */
 
 
 	cmpl	$ftrace_stub, ftrace_trace_function
 	cmpl	$ftrace_stub, ftrace_trace_function
-	jnz	trace
+	jnz	.Ltrace
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 	cmpl	$ftrace_stub, ftrace_graph_return
 	cmpl	$ftrace_stub, ftrace_graph_return
 	jnz	ftrace_graph_caller
 	jnz	ftrace_graph_caller
@@ -976,7 +977,7 @@ ftrace_stub:
 	ret
 	ret
 
 
 	/* taken from glibc */
 	/* taken from glibc */
-trace:
+.Ltrace:
 	pushl	%eax
 	pushl	%eax
 	pushl	%ecx
 	pushl	%ecx
 	pushl	%edx
 	pushl	%edx
@@ -1116,7 +1117,7 @@ ENTRY(nmi)
 	movl	%ss, %eax
 	movl	%ss, %eax
 	cmpw	$__ESPFIX_SS, %ax
 	cmpw	$__ESPFIX_SS, %ax
 	popl	%eax
 	popl	%eax
-	je	nmi_espfix_stack
+	je	.Lnmi_espfix_stack
 #endif
 #endif
 
 
 	pushl	%eax				# pt_regs->orig_ax
 	pushl	%eax				# pt_regs->orig_ax
@@ -1132,7 +1133,7 @@ ENTRY(nmi)
 
 
 	/* Not on SYSENTER stack. */
 	/* Not on SYSENTER stack. */
 	call	do_nmi
 	call	do_nmi
-	jmp	restore_all_notrace
+	jmp	.Lrestore_all_notrace
 
 
 .Lnmi_from_sysenter_stack:
 .Lnmi_from_sysenter_stack:
 	/*
 	/*
@@ -1143,10 +1144,10 @@ ENTRY(nmi)
 	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
 	movl	PER_CPU_VAR(cpu_current_top_of_stack), %esp
 	call	do_nmi
 	call	do_nmi
 	movl	%ebp, %esp
 	movl	%ebp, %esp
-	jmp	restore_all_notrace
+	jmp	.Lrestore_all_notrace
 
 
 #ifdef CONFIG_X86_ESPFIX32
 #ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
+.Lnmi_espfix_stack:
 	/*
 	/*
 	 * create the pointer to lss back
 	 * create the pointer to lss back
 	 */
 	 */
@@ -1164,7 +1165,7 @@ nmi_espfix_stack:
 	call	do_nmi
 	call	do_nmi
 	RESTORE_REGS
 	RESTORE_REGS
 	lss	12+4(%esp), %esp		# back to espfix stack
 	lss	12+4(%esp), %esp		# back to espfix stack
-	jmp	irq_return
+	jmp	.Lirq_return
 #endif
 #endif
 END(nmi)
 END(nmi)
 
 

+ 16 - 16
arch/x86/kernel/head_32.S

@@ -248,19 +248,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
 #ifdef CONFIG_PARAVIRT
 #ifdef CONFIG_PARAVIRT
 	/* This is can only trip for a broken bootloader... */
 	/* This is can only trip for a broken bootloader... */
 	cmpw $0x207, pa(boot_params + BP_version)
 	cmpw $0x207, pa(boot_params + BP_version)
-	jb default_entry
+	jb .Ldefault_entry
 
 
 	/* Paravirt-compatible boot parameters.  Look to see what architecture
 	/* Paravirt-compatible boot parameters.  Look to see what architecture
 		we're booting under. */
 		we're booting under. */
 	movl pa(boot_params + BP_hardware_subarch), %eax
 	movl pa(boot_params + BP_hardware_subarch), %eax
 	cmpl $num_subarch_entries, %eax
 	cmpl $num_subarch_entries, %eax
-	jae bad_subarch
+	jae .Lbad_subarch
 
 
 	movl pa(subarch_entries)(,%eax,4), %eax
 	movl pa(subarch_entries)(,%eax,4), %eax
 	subl $__PAGE_OFFSET, %eax
 	subl $__PAGE_OFFSET, %eax
 	jmp *%eax
 	jmp *%eax
 
 
-bad_subarch:
+.Lbad_subarch:
 WEAK(lguest_entry)
 WEAK(lguest_entry)
 WEAK(xen_entry)
 WEAK(xen_entry)
 	/* Unknown implementation; there's really
 	/* Unknown implementation; there's really
@@ -270,14 +270,14 @@ WEAK(xen_entry)
 	__INITDATA
 	__INITDATA
 
 
 subarch_entries:
 subarch_entries:
-	.long default_entry		/* normal x86/PC */
+	.long .Ldefault_entry		/* normal x86/PC */
 	.long lguest_entry		/* lguest hypervisor */
 	.long lguest_entry		/* lguest hypervisor */
 	.long xen_entry			/* Xen hypervisor */
 	.long xen_entry			/* Xen hypervisor */
-	.long default_entry		/* Moorestown MID */
+	.long .Ldefault_entry		/* Moorestown MID */
 num_subarch_entries = (. - subarch_entries) / 4
 num_subarch_entries = (. - subarch_entries) / 4
 .previous
 .previous
 #else
 #else
-	jmp default_entry
+	jmp .Ldefault_entry
 #endif /* CONFIG_PARAVIRT */
 #endif /* CONFIG_PARAVIRT */
 
 
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
@@ -317,7 +317,7 @@ ENTRY(startup_32_smp)
 	call load_ucode_ap
 	call load_ucode_ap
 #endif
 #endif
 
 
-default_entry:
+.Ldefault_entry:
 #define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
 #define CR0_STATE	(X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
 			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
 			 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
 			 X86_CR0_PG)
 			 X86_CR0_PG)
@@ -347,7 +347,7 @@ default_entry:
 	pushfl
 	pushfl
 	popl %eax			# get EFLAGS
 	popl %eax			# get EFLAGS
 	testl $X86_EFLAGS_ID,%eax	# did EFLAGS.ID remained set?
 	testl $X86_EFLAGS_ID,%eax	# did EFLAGS.ID remained set?
-	jz enable_paging		# hw disallowed setting of ID bit
+	jz .Lenable_paging		# hw disallowed setting of ID bit
 					# which means no CPUID and no CR4
 					# which means no CPUID and no CR4
 
 
 	xorl %eax,%eax
 	xorl %eax,%eax
@@ -357,13 +357,13 @@ default_entry:
 	movl $1,%eax
 	movl $1,%eax
 	cpuid
 	cpuid
 	andl $~1,%edx			# Ignore CPUID.FPU
 	andl $~1,%edx			# Ignore CPUID.FPU
-	jz enable_paging		# No flags or only CPUID.FPU = no CR4
+	jz .Lenable_paging		# No flags or only CPUID.FPU = no CR4
 
 
 	movl pa(mmu_cr4_features),%eax
 	movl pa(mmu_cr4_features),%eax
 	movl %eax,%cr4
 	movl %eax,%cr4
 
 
 	testb $X86_CR4_PAE, %al		# check if PAE is enabled
 	testb $X86_CR4_PAE, %al		# check if PAE is enabled
-	jz enable_paging
+	jz .Lenable_paging
 
 
 	/* Check if extended functions are implemented */
 	/* Check if extended functions are implemented */
 	movl $0x80000000, %eax
 	movl $0x80000000, %eax
@@ -371,7 +371,7 @@ default_entry:
 	/* Value must be in the range 0x80000001 to 0x8000ffff */
 	/* Value must be in the range 0x80000001 to 0x8000ffff */
 	subl $0x80000001, %eax
 	subl $0x80000001, %eax
 	cmpl $(0x8000ffff-0x80000001), %eax
 	cmpl $(0x8000ffff-0x80000001), %eax
-	ja enable_paging
+	ja .Lenable_paging
 
 
 	/* Clear bogus XD_DISABLE bits */
 	/* Clear bogus XD_DISABLE bits */
 	call verify_cpu
 	call verify_cpu
@@ -380,7 +380,7 @@ default_entry:
 	cpuid
 	cpuid
 	/* Execute Disable bit supported? */
 	/* Execute Disable bit supported? */
 	btl $(X86_FEATURE_NX & 31), %edx
 	btl $(X86_FEATURE_NX & 31), %edx
-	jnc enable_paging
+	jnc .Lenable_paging
 
 
 	/* Setup EFER (Extended Feature Enable Register) */
 	/* Setup EFER (Extended Feature Enable Register) */
 	movl $MSR_EFER, %ecx
 	movl $MSR_EFER, %ecx
@@ -390,7 +390,7 @@ default_entry:
 	/* Make changes effective */
 	/* Make changes effective */
 	wrmsr
 	wrmsr
 
 
-enable_paging:
+.Lenable_paging:
 
 
 /*
 /*
  * Enable paging
  * Enable paging
@@ -419,7 +419,7 @@ enable_paging:
  */
  */
 	movb $4,X86			# at least 486
 	movb $4,X86			# at least 486
 	cmpl $-1,X86_CPUID
 	cmpl $-1,X86_CPUID
-	je is486
+	je .Lis486
 
 
 	/* get vendor info */
 	/* get vendor info */
 	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
 	xorl %eax,%eax			# call CPUID with 0 -> return vendor ID
@@ -430,7 +430,7 @@ enable_paging:
 	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
 	movl %ecx,X86_VENDOR_ID+8	# last 4 chars
 
 
 	orl %eax,%eax			# do we have processor info as well?
 	orl %eax,%eax			# do we have processor info as well?
-	je is486
+	je .Lis486
 
 
 	movl $1,%eax		# Use the CPUID instruction to get CPU type
 	movl $1,%eax		# Use the CPUID instruction to get CPU type
 	cpuid
 	cpuid
@@ -444,7 +444,7 @@ enable_paging:
 	movb %cl,X86_MASK
 	movb %cl,X86_MASK
 	movl %edx,X86_CAPABILITY
 	movl %edx,X86_CAPABILITY
 
 
-is486:
+.Lis486:
 	movl $0x50022,%ecx	# set AM, WP, NE and MP
 	movl $0x50022,%ecx	# set AM, WP, NE and MP
 	movl %cr0,%eax
 	movl %cr0,%eax
 	andl $0x80000011,%eax	# Save PG,PE,ET
 	andl $0x80000011,%eax	# Save PG,PE,ET