Explorar o código

Merge remote-tracking branch 'anton/abiv2' into next

This series adds support for building the powerpc 64-bit
LE kernel using the new ABI v2. We already supported
running ABI v2 userspace programs but this adds support
for building the kernel itself using the new ABI.
Benjamin Herrenschmidt %!s(int64=11) %!d(string=hai) anos
pai
achega
f6869e7fe6
Modificáronse 55 ficheiros con 902 adicións e 680 borrados
  1. 8 3
      arch/powerpc/Makefile
  2. 2 2
      arch/powerpc/boot/util.S
  3. 36 4
      arch/powerpc/include/asm/code-patching.h
  4. 2 2
      arch/powerpc/include/asm/context_tracking.h
  5. 3 3
      arch/powerpc/include/asm/exception-64e.h
  6. 1 1
      arch/powerpc/include/asm/exception-64s.h
  7. 2 0
      arch/powerpc/include/asm/ftrace.h
  8. 4 4
      arch/powerpc/include/asm/irqflags.h
  9. 3 2
      arch/powerpc/include/asm/kprobes.h
  10. 2 0
      arch/powerpc/include/asm/linkage.h
  11. 4 0
      arch/powerpc/include/asm/module.h
  12. 38 34
      arch/powerpc/include/asm/ppc_asm.h
  13. 2 0
      arch/powerpc/include/asm/sections.h
  14. 3 3
      arch/powerpc/include/asm/systbl.h
  15. 9 1
      arch/powerpc/include/uapi/asm/elf.h
  16. 14 14
      arch/powerpc/kernel/cpu_setup_fsl_booke.S
  17. 58 59
      arch/powerpc/kernel/entry_64.S
  18. 70 70
      arch/powerpc/kernel/exceptions-64e.S
  19. 103 103
      arch/powerpc/kernel/exceptions-64s.S
  20. 39 98
      arch/powerpc/kernel/ftrace.c
  21. 59 58
      arch/powerpc/kernel/head_64.S
  22. 1 1
      arch/powerpc/kernel/idle_book3e.S
  23. 1 1
      arch/powerpc/kernel/idle_power4.S
  24. 2 2
      arch/powerpc/kernel/idle_power7.S
  25. 39 7
      arch/powerpc/kernel/misc_64.S
  26. 225 54
      arch/powerpc/kernel/module_64.c
  27. 5 12
      arch/powerpc/kernel/process.c
  28. 1 1
      arch/powerpc/kernel/prom_init_check.sh
  29. 1 1
      arch/powerpc/kernel/setup_64.c
  30. 11 7
      arch/powerpc/kernel/systbl.S
  31. 6 7
      arch/powerpc/kernel/tm.S
  32. 1 1
      arch/powerpc/kvm/book3s_hv_interrupts.S
  33. 17 17
      arch/powerpc/kvm/book3s_hv_rmhandlers.S
  34. 1 1
      arch/powerpc/lib/copypage_64.S
  35. 6 6
      arch/powerpc/lib/copypage_power7.S
  36. 1 1
      arch/powerpc/lib/copyuser_64.S
  37. 16 16
      arch/powerpc/lib/copyuser_power7.S
  38. 4 4
      arch/powerpc/lib/hweight_64.S
  39. 2 2
      arch/powerpc/lib/mem_64.S
  40. 5 5
      arch/powerpc/lib/memcpy_64.S
  41. 13 13
      arch/powerpc/lib/memcpy_power7.S
  42. 28 16
      arch/powerpc/mm/hash_low_64.S
  43. 16 20
      arch/powerpc/mm/hash_utils_64.c
  44. 6 6
      arch/powerpc/mm/slb.c
  45. 8 4
      arch/powerpc/mm/slb_low.S
  46. 2 1
      arch/powerpc/platforms/85xx/smp.c
  47. 3 2
      arch/powerpc/platforms/cell/smp.c
  48. 1 1
      arch/powerpc/platforms/pasemi/powersave.S
  49. 2 0
      arch/powerpc/platforms/powernv/opal-takeover.S
  50. 2 2
      arch/powerpc/platforms/powernv/opal-wrappers.S
  51. 3 2
      arch/powerpc/platforms/powernv/smp.c
  52. 2 2
      arch/powerpc/platforms/pseries/hvCall.S
  53. 3 2
      arch/powerpc/platforms/pseries/smp.c
  54. 2 1
      arch/powerpc/platforms/wsp/scom_smp.c
  55. 4 1
      tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h

+ 8 - 3
arch/powerpc/Makefile

@@ -113,8 +113,13 @@ else
 endif
 endif
 endif
 endif
 
 
-CFLAGS-$(CONFIG_PPC64)	:= -mtraceback=no -mcall-aixdesc
-CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv1)
+CFLAGS-$(CONFIG_PPC64)	:= -mtraceback=no
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2,-mcall-aixdesc)
+AFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mabi=elfv2)
+else
+CFLAGS-$(CONFIG_PPC64)	+= -mcall-aixdesc
+endif
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,-mminimal-toc)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mcmodel=medium,-mminimal-toc)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)
 CFLAGS-$(CONFIG_PPC64)	+= $(call cc-option,-mno-pointers-to-nested-functions)
 CFLAGS-$(CONFIG_PPC32)	:= -ffixed-r2 $(MULTIPLEWORD)
 CFLAGS-$(CONFIG_PPC32)	:= -ffixed-r2 $(MULTIPLEWORD)
@@ -151,7 +156,7 @@ endif
 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
 CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
 
 
 KBUILD_CPPFLAGS	+= -Iarch/$(ARCH)
 KBUILD_CPPFLAGS	+= -Iarch/$(ARCH)
-KBUILD_AFLAGS	+= -Iarch/$(ARCH)
+KBUILD_AFLAGS	+= -Iarch/$(ARCH) $(AFLAGS-y)
 KBUILD_CFLAGS	+= -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
 KBUILD_CFLAGS	+= -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
 CPP		= $(CC) -E $(KBUILD_CFLAGS)
 CPP		= $(CC) -E $(KBUILD_CFLAGS)
 
 

+ 2 - 2
arch/powerpc/boot/util.S

@@ -45,7 +45,7 @@ udelay:
 	mfspr	r4,SPRN_PVR
 	mfspr	r4,SPRN_PVR
 	srwi	r4,r4,16
 	srwi	r4,r4,16
 	cmpwi	0,r4,1		/* 601 ? */
 	cmpwi	0,r4,1		/* 601 ? */
-	bne	.udelay_not_601
+	bne	.Ludelay_not_601
 00:	li	r0,86	/* Instructions / microsecond? */
 00:	li	r0,86	/* Instructions / microsecond? */
 	mtctr	r0
 	mtctr	r0
 10:	addi	r0,r0,0 /* NOP */
 10:	addi	r0,r0,0 /* NOP */
@@ -54,7 +54,7 @@ udelay:
 	bne	00b
 	bne	00b
 	blr
 	blr
 
 
-.udelay_not_601:
+.Ludelay_not_601:
 	mulli	r4,r3,1000	/* nanoseconds */
 	mulli	r4,r3,1000	/* nanoseconds */
 	/*  Change r4 to be the number of ticks using:
 	/*  Change r4 to be the number of ticks using:
 	 *	(nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
 	 *	(nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns

+ 36 - 4
arch/powerpc/include/asm/code-patching.h

@@ -42,15 +42,47 @@ void __patch_exception(int exc, unsigned long addr);
 } while (0)
 } while (0)
 #endif
 #endif
 
 
+#define OP_RT_RA_MASK	0xffff0000UL
+#define LIS_R2		0x3c020000UL
+#define ADDIS_R2_R12	0x3c4c0000UL
+#define ADDI_R2_R2	0x38420000UL
+
 static inline unsigned long ppc_function_entry(void *func)
 static inline unsigned long ppc_function_entry(void *func)
 {
 {
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+	u32 *insn = func;
+
+	/*
+	 * A PPC64 ABIv2 function may have a local and a global entry
+	 * point. We need to use the local entry point when patching
+	 * functions, so identify and step over the global entry point
+	 * sequence.
+	 *
+	 * The global entry point sequence is always of the form:
+	 *
+	 * addis r2,r12,XXXX
+	 * addi  r2,r2,XXXX
+	 *
+	 * A linker optimisation may convert the addis to lis:
+	 *
+	 * lis   r2,XXXX
+	 * addi  r2,r2,XXXX
+	 */
+	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
+	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
+	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
+		return (unsigned long)(insn + 2);
+	else
+		return (unsigned long)func;
+#else
 	/*
 	/*
-	 * On PPC64 the function pointer actually points to the function's
-	 * descriptor. The first entry in the descriptor is the address
-	 * of the function text.
+	 * On PPC64 ABIv1 the function pointer actually points to the
+	 * function's descriptor. The first entry in the descriptor is the
+	 * address of the function text.
 	 */
 	 */
 	return ((func_descr_t *)func)->entry;
 	return ((func_descr_t *)func)->entry;
+#endif
 #else
 #else
 	return (unsigned long)func;
 	return (unsigned long)func;
 #endif
 #endif

+ 2 - 2
arch/powerpc/include/asm/context_tracking.h

@@ -2,9 +2,9 @@
 #define _ASM_POWERPC_CONTEXT_TRACKING_H
 #define _ASM_POWERPC_CONTEXT_TRACKING_H
 
 
 #ifdef CONFIG_CONTEXT_TRACKING
 #ifdef CONFIG_CONTEXT_TRACKING
-#define SCHEDULE_USER bl	.schedule_user
+#define SCHEDULE_USER bl	schedule_user
 #else
 #else
-#define SCHEDULE_USER bl	.schedule
+#define SCHEDULE_USER bl	schedule
 #endif
 #endif
 
 
 #endif
 #endif

+ 3 - 3
arch/powerpc/include/asm/exception-64e.h

@@ -174,10 +174,10 @@ exc_##label##_book3e:
 	mtlr	r16;
 	mtlr	r16;
 #define TLB_MISS_STATS_D(name)						    \
 #define TLB_MISS_STATS_D(name)						    \
 	addi	r9,r13,MMSTAT_DSTATS+name;				    \
 	addi	r9,r13,MMSTAT_DSTATS+name;				    \
-	bl	.tlb_stat_inc;
+	bl	tlb_stat_inc;
 #define TLB_MISS_STATS_I(name)						    \
 #define TLB_MISS_STATS_I(name)						    \
 	addi	r9,r13,MMSTAT_ISTATS+name;				    \
 	addi	r9,r13,MMSTAT_ISTATS+name;				    \
-	bl	.tlb_stat_inc;
+	bl	tlb_stat_inc;
 #define TLB_MISS_STATS_X(name)						    \
 #define TLB_MISS_STATS_X(name)						    \
 	ld	r8,PACA_EXTLB+EX_TLB_ESR(r13);				    \
 	ld	r8,PACA_EXTLB+EX_TLB_ESR(r13);				    \
 	cmpdi	cr2,r8,-1;						    \
 	cmpdi	cr2,r8,-1;						    \
@@ -185,7 +185,7 @@ exc_##label##_book3e:
 	addi	r9,r13,MMSTAT_DSTATS+name;				    \
 	addi	r9,r13,MMSTAT_DSTATS+name;				    \
 	b	62f;							    \
 	b	62f;							    \
 61:	addi	r9,r13,MMSTAT_ISTATS+name;				    \
 61:	addi	r9,r13,MMSTAT_ISTATS+name;				    \
-62:	bl	.tlb_stat_inc;
+62:	bl	tlb_stat_inc;
 #define TLB_MISS_STATS_SAVE_INFO					    \
 #define TLB_MISS_STATS_SAVE_INFO					    \
 	std	r14,EX_TLB_ESR(r12);	/* save ESR */
 	std	r14,EX_TLB_ESR(r12);	/* save ESR */
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED					    \
 #define TLB_MISS_STATS_SAVE_INFO_BOLTED					    \

+ 1 - 1
arch/powerpc/include/asm/exception-64s.h

@@ -517,7 +517,7 @@ label##_relon_hv:							\
 #define DISABLE_INTS	RECONCILE_IRQ_STATE(r10,r11)
 #define DISABLE_INTS	RECONCILE_IRQ_STATE(r10,r11)
 
 
 #define ADD_NVGPRS				\
 #define ADD_NVGPRS				\
-	bl	.save_nvgprs
+	bl	save_nvgprs
 
 
 #define RUNLATCH_ON				\
 #define RUNLATCH_ON				\
 BEGIN_FTR_SECTION				\
 BEGIN_FTR_SECTION				\

+ 2 - 0
arch/powerpc/include/asm/ftrace.h

@@ -61,6 +61,7 @@ struct dyn_arch_ftrace {
 #endif
 #endif
 
 
 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 {
 {
@@ -72,6 +73,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
 	 */
 	 */
 	return !strcmp(sym + 4, name + 3);
 	return !strcmp(sym + 4, name + 3);
 }
 }
+#endif
 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
 
 
 #endif /* _ASM_POWERPC_FTRACE */
 #endif /* _ASM_POWERPC_FTRACE */

+ 4 - 4
arch/powerpc/include/asm/irqflags.h

@@ -20,9 +20,9 @@
  */
  */
 #define TRACE_WITH_FRAME_BUFFER(func)		\
 #define TRACE_WITH_FRAME_BUFFER(func)		\
 	mflr	r0;				\
 	mflr	r0;				\
-	stdu	r1, -32(r1);			\
+	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
 	std	r0, 16(r1);			\
 	std	r0, 16(r1);			\
-	stdu	r1, -32(r1);			\
+	stdu	r1, -STACK_FRAME_OVERHEAD(r1);	\
 	bl func;				\
 	bl func;				\
 	ld	r1, 0(r1);			\
 	ld	r1, 0(r1);			\
 	ld	r1, 0(r1);
 	ld	r1, 0(r1);
@@ -36,8 +36,8 @@
  * have to call a C function so call a wrapper that saves all the
  * have to call a C function so call a wrapper that saves all the
  * C-clobbered registers.
  * C-clobbered registers.
  */
  */
-#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
-#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
+#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_on)
+#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(trace_hardirqs_off)
 
 
 /*
 /*
  * This is used by assembly code to soft-disable interrupts first and
  * This is used by assembly code to soft-disable interrupts first and

+ 3 - 2
arch/powerpc/include/asm/kprobes.h

@@ -30,6 +30,7 @@
 #include <linux/ptrace.h>
 #include <linux/ptrace.h>
 #include <linux/percpu.h>
 #include <linux/percpu.h>
 #include <asm/probes.h>
 #include <asm/probes.h>
+#include <asm/code-patching.h>
 
 
 #define  __ARCH_WANT_KPROBES_INSN_SLOT
 #define  __ARCH_WANT_KPROBES_INSN_SLOT
 
 
@@ -56,9 +57,9 @@ typedef ppc_opcode_t kprobe_opcode_t;
 		if ((colon = strchr(name, ':')) != NULL) {		\
 		if ((colon = strchr(name, ':')) != NULL) {		\
 			colon++;					\
 			colon++;					\
 			if (*colon != '\0' && *colon != '.')		\
 			if (*colon != '\0' && *colon != '.')		\
-				addr = *(kprobe_opcode_t **)addr;	\
+				addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
 		} else if (name[0] != '.')				\
 		} else if (name[0] != '.')				\
-			addr = *(kprobe_opcode_t **)addr;		\
+			addr = (kprobe_opcode_t *)ppc_function_entry(addr); \
 	} else {							\
 	} else {							\
 		char dot_name[KSYM_NAME_LEN];				\
 		char dot_name[KSYM_NAME_LEN];				\
 		dot_name[0] = '.';					\
 		dot_name[0] = '.';					\

+ 2 - 0
arch/powerpc/include/asm/linkage.h

@@ -2,6 +2,7 @@
 #define _ASM_POWERPC_LINKAGE_H
 #define _ASM_POWERPC_LINKAGE_H
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 #define cond_syscall(x) \
 #define cond_syscall(x) \
 	asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n"		\
 	asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n"		\
 	     "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
 	     "\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
@@ -9,5 +10,6 @@
 	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"	\
 	asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n"	\
 	     "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
 	     "\t.globl ." #alias "\n\t.set ." #alias ", ." #name)
 #endif
 #endif
+#endif
 
 
 #endif	/* _ASM_POWERPC_LINKAGE_H */
 #endif	/* _ASM_POWERPC_LINKAGE_H */

+ 4 - 0
arch/powerpc/include/asm/module.h

@@ -35,6 +35,7 @@ struct mod_arch_specific {
 #ifdef __powerpc64__
 #ifdef __powerpc64__
 	unsigned int stubs_section;	/* Index of stubs section in module */
 	unsigned int stubs_section;	/* Index of stubs section in module */
 	unsigned int toc_section;	/* What section is the TOC? */
 	unsigned int toc_section;	/* What section is the TOC? */
+	bool toc_fixed;			/* Have we fixed up .TOC.? */
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 	unsigned long toc;
 	unsigned long toc;
 	unsigned long tramp;
 	unsigned long tramp;
@@ -77,6 +78,9 @@ struct mod_arch_specific {
 #    endif	/* MODULE */
 #    endif	/* MODULE */
 #endif
 #endif
 
 
+bool is_module_trampoline(u32 *insns);
+int module_trampoline_target(struct module *mod, u32 *trampoline,
+			     unsigned long *target);
 
 
 struct exception_table_entry;
 struct exception_table_entry;
 void sort_ex_table(struct exception_table_entry *start,
 void sort_ex_table(struct exception_table_entry *start,

+ 38 - 34
arch/powerpc/include/asm/ppc_asm.h

@@ -57,7 +57,7 @@ BEGIN_FW_FTR_SECTION;							\
 	LDX_BE	r10,0,r10;		/* get log write index */	\
 	LDX_BE	r10,0,r10;		/* get log write index */	\
 	cmpd	cr1,r11,r10;						\
 	cmpd	cr1,r11,r10;						\
 	beq+	cr1,33f;						\
 	beq+	cr1,33f;						\
-	bl	.accumulate_stolen_time;				\
+	bl	accumulate_stolen_time;				\
 	ld	r12,_MSR(r1);						\
 	ld	r12,_MSR(r1);						\
 	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \
 	andi.	r10,r12,MSR_PR;		/* Restore cr0 (coming from user) */ \
 33:									\
 33:									\
@@ -189,57 +189,53 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 #define __STK_REG(i)   (112 + ((i)-14)*8)
 #define __STK_REG(i)   (112 + ((i)-14)*8)
 #define STK_REG(i)     __STK_REG(__REG_##i)
 #define STK_REG(i)     __STK_REG(__REG_##i)
 
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define STK_GOT		24
+#define __STK_PARAM(i)	(32 + ((i)-3)*8)
+#else
+#define STK_GOT		40
 #define __STK_PARAM(i)	(48 + ((i)-3)*8)
 #define __STK_PARAM(i)	(48 + ((i)-3)*8)
+#endif
 #define STK_PARAM(i)	__STK_PARAM(__REG_##i)
 #define STK_PARAM(i)	__STK_PARAM(__REG_##i)
 
 
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
 
 
 #define _GLOBAL(name) \
 #define _GLOBAL(name) \
 	.section ".text"; \
 	.section ".text"; \
 	.align 2 ; \
 	.align 2 ; \
+	.type name,@function; \
 	.globl name; \
 	.globl name; \
-	.globl GLUE(.,name); \
-	.section ".opd","aw"; \
-name: \
-	.quad GLUE(.,name); \
-	.quad .TOC.@tocbase; \
-	.quad 0; \
-	.previous; \
-	.type GLUE(.,name),@function; \
-GLUE(.,name):
+name:
 
 
-#define _INIT_GLOBAL(name) \
-	__REF; \
+#define _GLOBAL_TOC(name) \
+	.section ".text"; \
 	.align 2 ; \
 	.align 2 ; \
+	.type name,@function; \
 	.globl name; \
 	.globl name; \
-	.globl GLUE(.,name); \
-	.section ".opd","aw"; \
 name: \
 name: \
-	.quad GLUE(.,name); \
-	.quad .TOC.@tocbase; \
-	.quad 0; \
-	.previous; \
-	.type GLUE(.,name),@function; \
-GLUE(.,name):
+0:	addis r2,r12,(.TOC.-0b)@ha; \
+	addi r2,r2,(.TOC.-0b)@l; \
+	.localentry name,.-name
 
 
 #define _KPROBE(name) \
 #define _KPROBE(name) \
 	.section ".kprobes.text","a"; \
 	.section ".kprobes.text","a"; \
 	.align 2 ; \
 	.align 2 ; \
+	.type name,@function; \
 	.globl name; \
 	.globl name; \
-	.globl GLUE(.,name); \
-	.section ".opd","aw"; \
-name: \
-	.quad GLUE(.,name); \
-	.quad .TOC.@tocbase; \
-	.quad 0; \
-	.previous; \
-	.type GLUE(.,name),@function; \
-GLUE(.,name):
+name:
+
+#define DOTSYM(a)	a
+
+#else
+
+#define XGLUE(a,b) a##b
+#define GLUE(a,b) XGLUE(a,b)
 
 
-#define _STATIC(name) \
+#define _GLOBAL(name) \
 	.section ".text"; \
 	.section ".text"; \
 	.align 2 ; \
 	.align 2 ; \
+	.globl name; \
+	.globl GLUE(.,name); \
 	.section ".opd","aw"; \
 	.section ".opd","aw"; \
 name: \
 name: \
 	.quad GLUE(.,name); \
 	.quad GLUE(.,name); \
@@ -249,9 +245,13 @@ name: \
 	.type GLUE(.,name),@function; \
 	.type GLUE(.,name),@function; \
 GLUE(.,name):
 GLUE(.,name):
 
 
-#define _INIT_STATIC(name) \
-	__REF; \
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
+#define _KPROBE(name) \
+	.section ".kprobes.text","a"; \
 	.align 2 ; \
 	.align 2 ; \
+	.globl name; \
+	.globl GLUE(.,name); \
 	.section ".opd","aw"; \
 	.section ".opd","aw"; \
 name: \
 name: \
 	.quad GLUE(.,name); \
 	.quad GLUE(.,name); \
@@ -261,6 +261,10 @@ name: \
 	.type GLUE(.,name),@function; \
 	.type GLUE(.,name),@function; \
 GLUE(.,name):
 GLUE(.,name):
 
 
+#define DOTSYM(a)	GLUE(.,a)
+
+#endif
+
 #else /* 32-bit */
 #else /* 32-bit */
 
 
 #define _ENTRY(n)	\
 #define _ENTRY(n)	\

+ 2 - 0
arch/powerpc/include/asm/sections.h

@@ -39,6 +39,7 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
 		(unsigned long)_stext < end;
 		(unsigned long)_stext < end;
 }
 }
 
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 #undef dereference_function_descriptor
 #undef dereference_function_descriptor
 static inline void *dereference_function_descriptor(void *ptr)
 static inline void *dereference_function_descriptor(void *ptr)
 {
 {
@@ -49,6 +50,7 @@ static inline void *dereference_function_descriptor(void *ptr)
 		ptr = p;
 		ptr = p;
 	return ptr;
 	return ptr;
 }
 }
+#endif
 
 
 #endif
 #endif
 
 

+ 3 - 3
arch/powerpc/include/asm/systbl.h

@@ -62,7 +62,7 @@ COMPAT_SYS_SPU(fcntl)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL_SPU(setpgid)
 SYSCALL_SPU(setpgid)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
+SYSX(sys_ni_syscall,sys_olduname,sys_olduname)
 SYSCALL_SPU(umask)
 SYSCALL_SPU(umask)
 SYSCALL_SPU(chroot)
 SYSCALL_SPU(chroot)
 COMPAT_SYS(ustat)
 COMPAT_SYS(ustat)
@@ -258,7 +258,7 @@ SYSCALL_SPU(tgkill)
 COMPAT_SYS_SPU(utimes)
 COMPAT_SYS_SPU(utimes)
 COMPAT_SYS_SPU(statfs64)
 COMPAT_SYS_SPU(statfs64)
 COMPAT_SYS_SPU(fstatfs64)
 COMPAT_SYS_SPU(fstatfs64)
-SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
+SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
 PPC_SYS_SPU(rtas)
 PPC_SYS_SPU(rtas)
 OLDSYS(debug_setcontext)
 OLDSYS(debug_setcontext)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
@@ -295,7 +295,7 @@ SYSCALL_SPU(mkdirat)
 SYSCALL_SPU(mknodat)
 SYSCALL_SPU(mknodat)
 SYSCALL_SPU(fchownat)
 SYSCALL_SPU(fchownat)
 COMPAT_SYS_SPU(futimesat)
 COMPAT_SYS_SPU(futimesat)
-SYSX_SPU(sys_newfstatat, sys_fstatat64, sys_fstatat64)
+SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)
 SYSCALL_SPU(unlinkat)
 SYSCALL_SPU(unlinkat)
 SYSCALL_SPU(renameat)
 SYSCALL_SPU(renameat)
 SYSCALL_SPU(linkat)
 SYSCALL_SPU(linkat)

+ 9 - 1
arch/powerpc/include/uapi/asm/elf.h

@@ -291,9 +291,17 @@ do {									\
 #define R_PPC64_DTPREL16_HIGHERA 104 /* half16	(sym+add)@dtprel@highera */
 #define R_PPC64_DTPREL16_HIGHERA 104 /* half16	(sym+add)@dtprel@highera */
 #define R_PPC64_DTPREL16_HIGHEST 105 /* half16	(sym+add)@dtprel@highest */
 #define R_PPC64_DTPREL16_HIGHEST 105 /* half16	(sym+add)@dtprel@highest */
 #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16	(sym+add)@dtprel@highesta */
 #define R_PPC64_DTPREL16_HIGHESTA 106 /* half16	(sym+add)@dtprel@highesta */
+#define R_PPC64_TLSGD		107
+#define R_PPC64_TLSLD		108
+#define R_PPC64_TOCSAVE		109
+
+#define R_PPC64_REL16		249
+#define R_PPC64_REL16_LO	250
+#define R_PPC64_REL16_HI	251
+#define R_PPC64_REL16_HA	252
 
 
 /* Keep this the last entry.  */
 /* Keep this the last entry.  */
-#define R_PPC64_NUM		107
+#define R_PPC64_NUM		253
 
 
 /* There's actually a third entry here, but it's unused */
 /* There's actually a third entry here, but it's unused */
 struct ppc64_opd_entry
 struct ppc64_opd_entry

+ 14 - 14
arch/powerpc/kernel/cpu_setup_fsl_booke.S

@@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle)
 _GLOBAL(__setup_cpu_e6500)
 _GLOBAL(__setup_cpu_e6500)
 	mflr	r6
 	mflr	r6
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-	bl	.setup_altivec_ivors
+	bl	setup_altivec_ivors
 	/* Touch IVOR42 only if the CPU supports E.HV category */
 	/* Touch IVOR42 only if the CPU supports E.HV category */
 	mfspr	r10,SPRN_MMUCFG
 	mfspr	r10,SPRN_MMUCFG
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	beq	1f
 	beq	1f
-	bl	.setup_lrat_ivor
+	bl	setup_lrat_ivor
 1:
 1:
 #endif
 #endif
 	bl	setup_pw20_idle
 	bl	setup_pw20_idle
@@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500)
 #ifdef CONFIG_PPC_BOOK3E_64
 #ifdef CONFIG_PPC_BOOK3E_64
 _GLOBAL(__restore_cpu_e6500)
 _GLOBAL(__restore_cpu_e6500)
 	mflr	r5
 	mflr	r5
-	bl	.setup_altivec_ivors
+	bl	setup_altivec_ivors
 	/* Touch IVOR42 only if the CPU supports E.HV category */
 	/* Touch IVOR42 only if the CPU supports E.HV category */
 	mfspr	r10,SPRN_MMUCFG
 	mfspr	r10,SPRN_MMUCFG
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	beq	1f
 	beq	1f
-	bl	.setup_lrat_ivor
+	bl	setup_lrat_ivor
 1:
 1:
-	bl	.setup_pw20_idle
-	bl	.setup_altivec_idle
+	bl	setup_pw20_idle
+	bl	setup_altivec_idle
 	bl	__restore_cpu_e5500
 	bl	__restore_cpu_e5500
 	mtlr	r5
 	mtlr	r5
 	blr
 	blr
@@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500)
 	mflr	r4
 	mflr	r4
 	bl	__e500_icache_setup
 	bl	__e500_icache_setup
 	bl	__e500_dcache_setup
 	bl	__e500_dcache_setup
-	bl	.__setup_base_ivors
-	bl	.setup_perfmon_ivor
-	bl	.setup_doorbell_ivors
+	bl	__setup_base_ivors
+	bl	setup_perfmon_ivor
+	bl	setup_doorbell_ivors
 	/*
 	/*
 	 * We only want to touch IVOR38-41 if we're running on hardware
 	 * We only want to touch IVOR38-41 if we're running on hardware
 	 * that supports category E.HV.  The architectural way to determine
 	 * that supports category E.HV.  The architectural way to determine
@@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500)
 	mfspr	r10,SPRN_MMUCFG
 	mfspr	r10,SPRN_MMUCFG
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	beq	1f
 	beq	1f
-	bl	.setup_ehv_ivors
+	bl	setup_ehv_ivors
 1:
 1:
 	mtlr	r4
 	mtlr	r4
 	blr
 	blr
@@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500)
 	mflr	r5
 	mflr	r5
 	bl	__e500_icache_setup
 	bl	__e500_icache_setup
 	bl	__e500_dcache_setup
 	bl	__e500_dcache_setup
-	bl	.__setup_base_ivors
-	bl	.setup_perfmon_ivor
-	bl	.setup_doorbell_ivors
+	bl	__setup_base_ivors
+	bl	setup_perfmon_ivor
+	bl	setup_doorbell_ivors
 	/*
 	/*
 	 * We only want to touch IVOR38-41 if we're running on hardware
 	 * We only want to touch IVOR38-41 if we're running on hardware
 	 * that supports category E.HV.  The architectural way to determine
 	 * that supports category E.HV.  The architectural way to determine
@@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500)
 	mfspr	r10,SPRN_MMUCFG
 	mfspr	r10,SPRN_MMUCFG
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	rlwinm.	r10,r10,0,MMUCFG_LPIDSIZE
 	beq	1f
 	beq	1f
-	bl	.setup_ehv_ivors
+	bl	setup_ehv_ivors
 	b	2f
 	b	2f
 1:
 1:
 	ld	r10,CPU_SPEC_FEATURES(r4)
 	ld	r10,CPU_SPEC_FEATURES(r4)

+ 58 - 59
arch/powerpc/kernel/entry_64.S

@@ -39,8 +39,8 @@
  * System calls.
  * System calls.
  */
  */
 	.section	".toc","aw"
 	.section	".toc","aw"
-.SYS_CALL_TABLE:
-	.tc .sys_call_table[TC],.sys_call_table
+SYS_CALL_TABLE:
+	.tc sys_call_table[TC],sys_call_table
 
 
 /* This value is used to mark exception frames on the stack. */
 /* This value is used to mark exception frames on the stack. */
 exception_marker:
 exception_marker:
@@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION
 	LDX_BE	r10,0,r10		/* get log write index */
 	LDX_BE	r10,0,r10		/* get log write index */
 	cmpd	cr1,r11,r10
 	cmpd	cr1,r11,r10
 	beq+	cr1,33f
 	beq+	cr1,33f
-	bl	.accumulate_stolen_time
+	bl	accumulate_stolen_time
 	REST_GPR(0,r1)
 	REST_GPR(0,r1)
 	REST_4GPRS(3,r1)
 	REST_4GPRS(3,r1)
 	REST_2GPRS(7,r1)
 	REST_2GPRS(7,r1)
@@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 	std	r10,SOFTE(r1)
 	std	r10,SOFTE(r1)
 
 
 #ifdef SHOW_SYSCALLS
 #ifdef SHOW_SYSCALLS
-	bl	.do_show_syscall
+	bl	do_show_syscall
 	REST_GPR(0,r1)
 	REST_GPR(0,r1)
 	REST_4GPRS(3,r1)
 	REST_4GPRS(3,r1)
 	REST_2GPRS(7,r1)
 	REST_2GPRS(7,r1)
@@ -162,7 +162,7 @@ system_call:			/* label this so stack traces look sane */
  * Need to vector to 32 Bit or default sys_call_table here,
  * Need to vector to 32 Bit or default sys_call_table here,
  * based on caller's run-mode / personality.
  * based on caller's run-mode / personality.
  */
  */
-	ld	r11,.SYS_CALL_TABLE@toc(2)
+	ld	r11,SYS_CALL_TABLE@toc(2)
 	andi.	r10,r10,_TIF_32BIT
 	andi.	r10,r10,_TIF_32BIT
 	beq	15f
 	beq	15f
 	addi	r11,r11,8	/* use 32-bit syscall entries */
 	addi	r11,r11,8	/* use 32-bit syscall entries */
@@ -174,14 +174,14 @@ system_call:			/* label this so stack traces look sane */
 	clrldi	r8,r8,32
 	clrldi	r8,r8,32
 15:
 15:
 	slwi	r0,r0,4
 	slwi	r0,r0,4
-	ldx	r10,r11,r0	/* Fetch system call handler [ptr] */
-	mtctr   r10
+	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
+	mtctr   r12
 	bctrl			/* Call handler */
 	bctrl			/* Call handler */
 
 
 syscall_exit:
 syscall_exit:
 	std	r3,RESULT(r1)
 	std	r3,RESULT(r1)
 #ifdef SHOW_SYSCALLS
 #ifdef SHOW_SYSCALLS
-	bl	.do_show_syscall_exit
+	bl	do_show_syscall_exit
 	ld	r3,RESULT(r1)
 	ld	r3,RESULT(r1)
 #endif
 #endif
 	CURRENT_THREAD_INFO(r12, r1)
 	CURRENT_THREAD_INFO(r12, r1)
@@ -248,9 +248,9 @@ syscall_error:
 	
 	
 /* Traced system call support */
 /* Traced system call support */
 syscall_dotrace:
 syscall_dotrace:
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_syscall_trace_enter
+	bl	do_syscall_trace_enter
 	/*
 	/*
 	 * Restore argument registers possibly just changed.
 	 * Restore argument registers possibly just changed.
 	 * We use the return value of do_syscall_trace_enter
 	 * We use the return value of do_syscall_trace_enter
@@ -308,7 +308,7 @@ syscall_exit_work:
 4:	/* Anything else left to do? */
 4:	/* Anything else left to do? */
 	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */
 	SET_DEFAULT_THREAD_PPR(r3, r10)		/* Set thread.ppr = 3 */
 	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
-	beq	.ret_from_except_lite
+	beq	ret_from_except_lite
 
 
 	/* Re-enable interrupts */
 	/* Re-enable interrupts */
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
@@ -319,10 +319,10 @@ syscall_exit_work:
 	mtmsrd	r10,1
 	mtmsrd	r10,1
 #endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PPC_BOOK3E */
 
 
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_syscall_trace_leave
-	b	.ret_from_except
+	bl	do_syscall_trace_leave
+	b	ret_from_except
 
 
 /* Save non-volatile GPRs, if not already saved. */
 /* Save non-volatile GPRs, if not already saved. */
 _GLOBAL(save_nvgprs)
 _GLOBAL(save_nvgprs)
@@ -345,42 +345,44 @@ _GLOBAL(save_nvgprs)
  */
  */
 
 
 _GLOBAL(ppc_fork)
 _GLOBAL(ppc_fork)
-	bl	.save_nvgprs
-	bl	.sys_fork
+	bl	save_nvgprs
+	bl	sys_fork
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ppc_vfork)
 _GLOBAL(ppc_vfork)
-	bl	.save_nvgprs
-	bl	.sys_vfork
+	bl	save_nvgprs
+	bl	sys_vfork
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ppc_clone)
 _GLOBAL(ppc_clone)
-	bl	.save_nvgprs
-	bl	.sys_clone
+	bl	save_nvgprs
+	bl	sys_clone
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ppc32_swapcontext)
 _GLOBAL(ppc32_swapcontext)
-	bl	.save_nvgprs
-	bl	.compat_sys_swapcontext
+	bl	save_nvgprs
+	bl	compat_sys_swapcontext
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ppc64_swapcontext)
 _GLOBAL(ppc64_swapcontext)
-	bl	.save_nvgprs
-	bl	.sys_swapcontext
+	bl	save_nvgprs
+	bl	sys_swapcontext
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ret_from_fork)
 _GLOBAL(ret_from_fork)
-	bl	.schedule_tail
+	bl	schedule_tail
 	REST_NVGPRS(r1)
 	REST_NVGPRS(r1)
 	li	r3,0
 	li	r3,0
 	b	syscall_exit
 	b	syscall_exit
 
 
 _GLOBAL(ret_from_kernel_thread)
 _GLOBAL(ret_from_kernel_thread)
-	bl	.schedule_tail
+	bl	schedule_tail
 	REST_NVGPRS(r1)
 	REST_NVGPRS(r1)
-	ld	r14, 0(r14)
 	mtlr	r14
 	mtlr	r14
 	mr	r3,r15
 	mr	r3,r15
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+	mr	r12,r14
+#endif
 	blrl
 	blrl
 	li	r3,0
 	li	r3,0
 	b	syscall_exit
 	b	syscall_exit
@@ -611,7 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 _GLOBAL(ret_from_except)
 _GLOBAL(ret_from_except)
 	ld	r11,_TRAP(r1)
 	ld	r11,_TRAP(r1)
 	andi.	r0,r11,1
 	andi.	r0,r11,1
-	bne	.ret_from_except_lite
+	bne	ret_from_except_lite
 	REST_NVGPRS(r1)
 	REST_NVGPRS(r1)
 
 
 _GLOBAL(ret_from_except_lite)
 _GLOBAL(ret_from_except_lite)
@@ -661,23 +663,23 @@ _GLOBAL(ret_from_except_lite)
 #endif
 #endif
 1:	andi.	r0,r4,_TIF_NEED_RESCHED
 1:	andi.	r0,r4,_TIF_NEED_RESCHED
 	beq	2f
 	beq	2f
-	bl	.restore_interrupts
+	bl	restore_interrupts
 	SCHEDULE_USER
 	SCHEDULE_USER
-	b	.ret_from_except_lite
+	b	ret_from_except_lite
 2:
 2:
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
 	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
 	bne	3f		/* only restore TM if nothing else to do */
 	bne	3f		/* only restore TM if nothing else to do */
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.restore_tm_state
+	bl	restore_tm_state
 	b	restore
 	b	restore
 3:
 3:
 #endif
 #endif
-	bl	.save_nvgprs
-	bl	.restore_interrupts
+	bl	save_nvgprs
+	bl	restore_interrupts
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_notify_resume
-	b	.ret_from_except
+	bl	do_notify_resume
+	b	ret_from_except
 
 
 resume_kernel:
 resume_kernel:
 	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
@@ -730,7 +732,7 @@ resume_kernel:
 	 * sure we are soft-disabled first and reconcile irq state.
 	 * sure we are soft-disabled first and reconcile irq state.
 	 */
 	 */
 	RECONCILE_IRQ_STATE(r3,r4)
 	RECONCILE_IRQ_STATE(r3,r4)
-1:	bl	.preempt_schedule_irq
+1:	bl	preempt_schedule_irq
 
 
 	/* Re-test flags and eventually loop */
 	/* Re-test flags and eventually loop */
 	CURRENT_THREAD_INFO(r9, r1)
 	CURRENT_THREAD_INFO(r9, r1)
@@ -792,7 +794,7 @@ restore_no_replay:
 	 */
 	 */
 do_restore:
 do_restore:
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
-	b	.exception_return_book3e
+	b	exception_return_book3e
 #else
 #else
 	/*
 	/*
 	 * Clear the reservation. If we know the CPU tracks the address of
 	 * Clear the reservation. If we know the CPU tracks the address of
@@ -907,7 +909,7 @@ restore_check_irq_replay:
 	 *
 	 *
 	 * Still, this might be useful for things like hash_page
 	 * Still, this might be useful for things like hash_page
 	 */
 	 */
-	bl	.__check_irq_replay
+	bl	__check_irq_replay
 	cmpwi	cr0,r3,0
 	cmpwi	cr0,r3,0
  	beq	restore_no_replay
  	beq	restore_no_replay
  
  
@@ -928,13 +930,13 @@ restore_check_irq_replay:
 	cmpwi	cr0,r3,0x500
 	cmpwi	cr0,r3,0x500
 	bne	1f
 	bne	1f
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
- 	bl	.do_IRQ
-	b	.ret_from_except
+ 	bl	do_IRQ
+	b	ret_from_except
 1:	cmpwi	cr0,r3,0x900
 1:	cmpwi	cr0,r3,0x900
 	bne	1f
 	bne	1f
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
-	bl	.timer_interrupt
-	b	.ret_from_except
+	bl	timer_interrupt
+	b	ret_from_except
 #ifdef CONFIG_PPC_DOORBELL
 #ifdef CONFIG_PPC_DOORBELL
 1:
 1:
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
@@ -948,14 +950,14 @@ restore_check_irq_replay:
 #endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PPC_BOOK3E */
 	bne	1f
 	bne	1f
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	addi	r3,r1,STACK_FRAME_OVERHEAD;
-	bl	.doorbell_exception
-	b	.ret_from_except
+	bl	doorbell_exception
+	b	ret_from_except
 #endif /* CONFIG_PPC_DOORBELL */
 #endif /* CONFIG_PPC_DOORBELL */
-1:	b	.ret_from_except /* What else to do here ? */
+1:	b	ret_from_except /* What else to do here ? */
  
  
 unrecov_restore:
 unrecov_restore:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
+	bl	unrecoverable_exception
 	b	unrecov_restore
 	b	unrecov_restore
 
 
 #ifdef CONFIG_PPC_RTAS
 #ifdef CONFIG_PPC_RTAS
@@ -1021,7 +1023,7 @@ _GLOBAL(enter_rtas)
         std	r6,PACASAVEDMSR(r13)
         std	r6,PACASAVEDMSR(r13)
 
 
 	/* Setup our real return addr */	
 	/* Setup our real return addr */	
-	LOAD_REG_ADDR(r4,.rtas_return_loc)
+	LOAD_REG_ADDR(r4,rtas_return_loc)
 	clrldi	r4,r4,2			/* convert to realmode address */
 	clrldi	r4,r4,2			/* convert to realmode address */
        	mtlr	r4
        	mtlr	r4
 
 
@@ -1045,7 +1047,7 @@ _GLOBAL(enter_rtas)
 	rfid
 	rfid
 	b	.	/* prevent speculative execution */
 	b	.	/* prevent speculative execution */
 
 
-_STATIC(rtas_return_loc)
+rtas_return_loc:
 	FIXUP_ENDIAN
 	FIXUP_ENDIAN
 
 
 	/* relocation is off at this point */
 	/* relocation is off at this point */
@@ -1054,7 +1056,7 @@ _STATIC(rtas_return_loc)
 
 
 	bcl	20,31,$+4
 	bcl	20,31,$+4
 0:	mflr	r3
 0:	mflr	r3
-	ld	r3,(1f-0b)(r3)		/* get &.rtas_restore_regs */
+	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
 
 
 	mfmsr   r6
 	mfmsr   r6
 	li	r0,MSR_RI
 	li	r0,MSR_RI
@@ -1071,9 +1073,9 @@ _STATIC(rtas_return_loc)
 	b	.	/* prevent speculative execution */
 	b	.	/* prevent speculative execution */
 
 
 	.align	3
 	.align	3
-1:	.llong	.rtas_restore_regs
+1:	.llong	rtas_restore_regs
 
 
-_STATIC(rtas_restore_regs)
+rtas_restore_regs:
 	/* relocation is on at this point */
 	/* relocation is on at this point */
 	REST_GPR(2, r1)			/* Restore the TOC */
 	REST_GPR(2, r1)			/* Restore the TOC */
 	REST_GPR(13, r1)		/* Restore paca */
 	REST_GPR(13, r1)		/* Restore paca */
@@ -1173,7 +1175,7 @@ _GLOBAL(mcount)
 _GLOBAL(_mcount)
 _GLOBAL(_mcount)
 	blr
 	blr
 
 
-_GLOBAL(ftrace_caller)
+_GLOBAL_TOC(ftrace_caller)
 	/* Taken from output of objdump from lib64/glibc */
 	/* Taken from output of objdump from lib64/glibc */
 	mflr	r3
 	mflr	r3
 	ld	r11, 0(r1)
 	ld	r11, 0(r1)
@@ -1197,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub)
 _GLOBAL(ftrace_stub)
 _GLOBAL(ftrace_stub)
 	blr
 	blr
 #else
 #else
-_GLOBAL(mcount)
-	blr
-
-_GLOBAL(_mcount)
+_GLOBAL_TOC(_mcount)
 	/* Taken from output of objdump from lib64/glibc */
 	/* Taken from output of objdump from lib64/glibc */
 	mflr	r3
 	mflr	r3
 	ld	r11, 0(r1)
 	ld	r11, 0(r1)
@@ -1238,7 +1237,7 @@ _GLOBAL(ftrace_graph_caller)
 	ld	r11, 112(r1)
 	ld	r11, 112(r1)
 	addi	r3, r11, 16
 	addi	r3, r11, 16
 
 
-	bl	.prepare_ftrace_return
+	bl	prepare_ftrace_return
 	nop
 	nop
 
 
 	ld	r0, 128(r1)
 	ld	r0, 128(r1)
@@ -1254,7 +1253,7 @@ _GLOBAL(return_to_handler)
 	mr	r31, r1
 	mr	r31, r1
 	stdu	r1, -112(r1)
 	stdu	r1, -112(r1)
 
 
-	bl	.ftrace_return_to_handler
+	bl	ftrace_return_to_handler
 	nop
 	nop
 
 
 	/* return value has real return address */
 	/* return value has real return address */
@@ -1284,7 +1283,7 @@ _GLOBAL(mod_return_to_handler)
 	 */
 	 */
 	ld	r2, PACATOC(r13)
 	ld	r2, PACATOC(r13)
 
 
-	bl	.ftrace_return_to_handler
+	bl	ftrace_return_to_handler
 	nop
 	nop
 
 
 	/* return value has real return address */
 	/* return value has real return address */

+ 70 - 70
arch/powerpc/kernel/exceptions-64e.S

@@ -499,7 +499,7 @@ exc_##n##_bad_stack:							    \
 	CHECK_NAPPING();						\
 	CHECK_NAPPING();						\
 	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 	bl	hdlr;							\
 	bl	hdlr;							\
-	b	.ret_from_except_lite;
+	b	ret_from_except_lite;
 
 
 /* This value is used to mark exception frames on the stack. */
 /* This value is used to mark exception frames on the stack. */
 	.section	".toc","aw"
 	.section	".toc","aw"
@@ -550,11 +550,11 @@ interrupt_end_book3e:
 	CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
 	CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
 			      PROLOG_ADDITION_NONE)
 			      PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON_CRIT(0x100)
 	EXCEPTION_COMMON_CRIT(0x100)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	bl	special_reg_save
 	bl	special_reg_save
 	CHECK_NAPPING();
 	CHECK_NAPPING();
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unknown_exception
+	bl	unknown_exception
 	b	ret_from_crit_except
 	b	ret_from_crit_except
 
 
 /* Machine Check Interrupt */
 /* Machine Check Interrupt */
@@ -562,11 +562,11 @@ interrupt_end_book3e:
 	MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
 	MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
 			    PROLOG_ADDITION_NONE)
 			    PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON_MC(0x000)
 	EXCEPTION_COMMON_MC(0x000)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	bl	special_reg_save
 	bl	special_reg_save
 	CHECK_NAPPING();
 	CHECK_NAPPING();
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.machine_check_exception
+	bl	machine_check_exception
 	b	ret_from_mc_except
 	b	ret_from_mc_except
 
 
 /* Data Storage Interrupt */
 /* Data Storage Interrupt */
@@ -591,7 +591,7 @@ interrupt_end_book3e:
 
 
 /* External Input Interrupt */
 /* External Input Interrupt */
 	MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
 	MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
-			   external_input, .do_IRQ, ACK_NONE)
+			   external_input, do_IRQ, ACK_NONE)
 
 
 /* Alignment */
 /* Alignment */
 	START_EXCEPTION(alignment);
 	START_EXCEPTION(alignment);
@@ -612,9 +612,9 @@ interrupt_end_book3e:
 	std	r14,_DSISR(r1)
 	std	r14,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	ld	r14,PACA_EXGEN+EX_R14(r13)
 	ld	r14,PACA_EXGEN+EX_R14(r13)
-	bl	.save_nvgprs
-	bl	.program_check_exception
-	b	.ret_from_except
+	bl	save_nvgprs
+	bl	program_check_exception
+	b	ret_from_except
 
 
 /* Floating Point Unavailable Interrupt */
 /* Floating Point Unavailable Interrupt */
 	START_EXCEPTION(fp_unavailable);
 	START_EXCEPTION(fp_unavailable);
@@ -625,13 +625,13 @@ interrupt_end_book3e:
 	ld	r12,_MSR(r1)
 	ld	r12,_MSR(r1)
 	andi.	r0,r12,MSR_PR;
 	andi.	r0,r12,MSR_PR;
 	beq-	1f
 	beq-	1f
-	bl	.load_up_fpu
+	bl	load_up_fpu
 	b	fast_exception_return
 	b	fast_exception_return
 1:	INTS_DISABLE
 1:	INTS_DISABLE
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.kernel_fp_unavailable_exception
-	b	.ret_from_except
+	bl	kernel_fp_unavailable_exception
+	b	ret_from_except
 
 
 /* Altivec Unavailable Interrupt */
 /* Altivec Unavailable Interrupt */
 	START_EXCEPTION(altivec_unavailable);
 	START_EXCEPTION(altivec_unavailable);
@@ -644,16 +644,16 @@ BEGIN_FTR_SECTION
 	ld	r12,_MSR(r1)
 	ld	r12,_MSR(r1)
 	andi.	r0,r12,MSR_PR;
 	andi.	r0,r12,MSR_PR;
 	beq-	1f
 	beq-	1f
-	bl	.load_up_altivec
+	bl	load_up_altivec
 	b	fast_exception_return
 	b	fast_exception_return
 1:
 1:
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 #endif
 	INTS_DISABLE
 	INTS_DISABLE
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.altivec_unavailable_exception
-	b	.ret_from_except
+	bl	altivec_unavailable_exception
+	b	ret_from_except
 
 
 /* AltiVec Assist */
 /* AltiVec Assist */
 	START_EXCEPTION(altivec_assist);
 	START_EXCEPTION(altivec_assist);
@@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 				PROLOG_ADDITION_NONE)
 				PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0x220)
 	EXCEPTION_COMMON(0x220)
 	INTS_DISABLE
 	INTS_DISABLE
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
-	bl	.altivec_assist_exception
+	bl	altivec_assist_exception
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #else
 #else
-	bl	.unknown_exception
+	bl	unknown_exception
 #endif
 #endif
-	b	.ret_from_except
+	b	ret_from_except
 
 
 
 
 /* Decrementer Interrupt */
 /* Decrementer Interrupt */
 	MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
 	MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
-			   decrementer, .timer_interrupt, ACK_DEC)
+			   decrementer, timer_interrupt, ACK_DEC)
 
 
 /* Fixed Interval Timer Interrupt */
 /* Fixed Interval Timer Interrupt */
 	MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
 	MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
-			   fixed_interval, .unknown_exception, ACK_FIT)
+			   fixed_interval, unknown_exception, ACK_FIT)
 
 
 /* Watchdog Timer Interrupt */
 /* Watchdog Timer Interrupt */
 	START_EXCEPTION(watchdog);
 	START_EXCEPTION(watchdog);
 	CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
 	CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
 			      PROLOG_ADDITION_NONE)
 			      PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON_CRIT(0x9f0)
 	EXCEPTION_COMMON_CRIT(0x9f0)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	bl	special_reg_save
 	bl	special_reg_save
 	CHECK_NAPPING();
 	CHECK_NAPPING();
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 #ifdef CONFIG_BOOKE_WDT
 #ifdef CONFIG_BOOKE_WDT
-	bl	.WatchdogException
+	bl	WatchdogException
 #else
 #else
-	bl	.unknown_exception
+	bl	unknown_exception
 #endif
 #endif
 	b	ret_from_crit_except
 	b	ret_from_crit_except
 
 
@@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 				PROLOG_ADDITION_NONE)
 				PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0xf20)
 	EXCEPTION_COMMON(0xf20)
 	INTS_DISABLE
 	INTS_DISABLE
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unknown_exception
-	b	.ret_from_except
+	bl	unknown_exception
+	b	ret_from_except
 
 
 /* Debug exception as a critical interrupt*/
 /* Debug exception as a critical interrupt*/
 	START_EXCEPTION(debug_crit);
 	START_EXCEPTION(debug_crit);
@@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 	mr	r4,r14
 	mr	r4,r14
 	ld	r14,PACA_EXCRIT+EX_R14(r13)
 	ld	r14,PACA_EXCRIT+EX_R14(r13)
 	ld	r15,PACA_EXCRIT+EX_R15(r13)
 	ld	r15,PACA_EXCRIT+EX_R15(r13)
-	bl	.save_nvgprs
-	bl	.DebugException
-	b	.ret_from_except
+	bl	save_nvgprs
+	bl	DebugException
+	b	ret_from_except
 
 
 kernel_dbg_exc:
 kernel_dbg_exc:
 	b	.	/* NYI */
 	b	.	/* NYI */
@@ -839,9 +839,9 @@ kernel_dbg_exc:
 	mr	r4,r14
 	mr	r4,r14
 	ld	r14,PACA_EXDBG+EX_R14(r13)
 	ld	r14,PACA_EXDBG+EX_R14(r13)
 	ld	r15,PACA_EXDBG+EX_R15(r13)
 	ld	r15,PACA_EXDBG+EX_R15(r13)
-	bl	.save_nvgprs
-	bl	.DebugException
-	b	.ret_from_except
+	bl	save_nvgprs
+	bl	DebugException
+	b	ret_from_except
 
 
 	START_EXCEPTION(perfmon);
 	START_EXCEPTION(perfmon);
 	NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
 	NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
@@ -850,23 +850,23 @@ kernel_dbg_exc:
 	INTS_DISABLE
 	INTS_DISABLE
 	CHECK_NAPPING()
 	CHECK_NAPPING()
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.performance_monitor_exception
-	b	.ret_from_except_lite
+	bl	performance_monitor_exception
+	b	ret_from_except_lite
 
 
 /* Doorbell interrupt */
 /* Doorbell interrupt */
 	MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
 	MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
-			   doorbell, .doorbell_exception, ACK_NONE)
+			   doorbell, doorbell_exception, ACK_NONE)
 
 
 /* Doorbell critical Interrupt */
 /* Doorbell critical Interrupt */
 	START_EXCEPTION(doorbell_crit);
 	START_EXCEPTION(doorbell_crit);
 	CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
 	CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
 			      PROLOG_ADDITION_NONE)
 			      PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON_CRIT(0x2a0)
 	EXCEPTION_COMMON_CRIT(0x2a0)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	bl	special_reg_save
 	bl	special_reg_save
 	CHECK_NAPPING();
 	CHECK_NAPPING();
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unknown_exception
+	bl	unknown_exception
 	b	ret_from_crit_except
 	b	ret_from_crit_except
 
 
 /*
 /*
@@ -878,21 +878,21 @@ kernel_dbg_exc:
 			        PROLOG_ADDITION_NONE)
 			        PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0x2c0)
 	EXCEPTION_COMMON(0x2c0)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	INTS_RESTORE_HARD
 	INTS_RESTORE_HARD
-	bl	.unknown_exception
-	b	.ret_from_except
+	bl	unknown_exception
+	b	ret_from_except
 
 
 /* Guest Doorbell critical Interrupt */
 /* Guest Doorbell critical Interrupt */
 	START_EXCEPTION(guest_doorbell_crit);
 	START_EXCEPTION(guest_doorbell_crit);
 	CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
 	CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
 			      PROLOG_ADDITION_NONE)
 			      PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON_CRIT(0x2e0)
 	EXCEPTION_COMMON_CRIT(0x2e0)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	bl	special_reg_save
 	bl	special_reg_save
 	CHECK_NAPPING();
 	CHECK_NAPPING();
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unknown_exception
+	bl	unknown_exception
 	b	ret_from_crit_except
 	b	ret_from_crit_except
 
 
 /* Hypervisor call */
 /* Hypervisor call */
@@ -901,10 +901,10 @@ kernel_dbg_exc:
 			        PROLOG_ADDITION_NONE)
 			        PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0x310)
 	EXCEPTION_COMMON(0x310)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	INTS_RESTORE_HARD
 	INTS_RESTORE_HARD
-	bl	.unknown_exception
-	b	.ret_from_except
+	bl	unknown_exception
+	b	ret_from_except
 
 
 /* Embedded Hypervisor priviledged  */
 /* Embedded Hypervisor priviledged  */
 	START_EXCEPTION(ehpriv);
 	START_EXCEPTION(ehpriv);
@@ -912,10 +912,10 @@ kernel_dbg_exc:
 			        PROLOG_ADDITION_NONE)
 			        PROLOG_ADDITION_NONE)
 	EXCEPTION_COMMON(0x320)
 	EXCEPTION_COMMON(0x320)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	INTS_RESTORE_HARD
 	INTS_RESTORE_HARD
-	bl	.unknown_exception
-	b	.ret_from_except
+	bl	unknown_exception
+	b	ret_from_except
 
 
 /* LRAT Error interrupt */
 /* LRAT Error interrupt */
 	START_EXCEPTION(lrat_error);
 	START_EXCEPTION(lrat_error);
@@ -1014,16 +1014,16 @@ storage_fault_common:
 	mr	r5,r15
 	mr	r5,r15
 	ld	r14,PACA_EXGEN+EX_R14(r13)
 	ld	r14,PACA_EXGEN+EX_R14(r13)
 	ld	r15,PACA_EXGEN+EX_R15(r13)
 	ld	r15,PACA_EXGEN+EX_R15(r13)
-	bl	.do_page_fault
+	bl	do_page_fault
 	cmpdi	r3,0
 	cmpdi	r3,0
 	bne-	1f
 	bne-	1f
-	b	.ret_from_except_lite
-1:	bl	.save_nvgprs
+	b	ret_from_except_lite
+1:	bl	save_nvgprs
 	mr	r5,r3
 	mr	r5,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	ld	r4,_DAR(r1)
 	ld	r4,_DAR(r1)
-	bl	.bad_page_fault
-	b	.ret_from_except
+	bl	bad_page_fault
+	b	ret_from_except
 
 
 /*
 /*
  * Alignment exception doesn't fit entirely in the 0x100 bytes so it
  * Alignment exception doesn't fit entirely in the 0x100 bytes so it
@@ -1035,10 +1035,10 @@ alignment_more:
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	ld	r14,PACA_EXGEN+EX_R14(r13)
 	ld	r14,PACA_EXGEN+EX_R14(r13)
 	ld	r15,PACA_EXGEN+EX_R15(r13)
 	ld	r15,PACA_EXGEN+EX_R15(r13)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	INTS_RESTORE_HARD
 	INTS_RESTORE_HARD
-	bl	.alignment_exception
-	b	.ret_from_except
+	bl	alignment_exception
+	b	ret_from_except
 
 
 /*
 /*
  * We branch here from entry_64.S for the last stage of the exception
  * We branch here from entry_64.S for the last stage of the exception
@@ -1172,7 +1172,7 @@ bad_stack_book3e:
 	std	r12,0(r11)
 	std	r12,0(r11)
 	ld	r2,PACATOC(r13)
 	ld	r2,PACATOC(r13)
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.kernel_bad_stack
+	bl	kernel_bad_stack
 	b	1b
 	b	1b
 
 
 /*
 /*
@@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e)
 	 * and always use AS 0, so we just set it up to match our link
 	 * and always use AS 0, so we just set it up to match our link
 	 * address and never use 0 based addresses.
 	 * address and never use 0 based addresses.
 	 */
 	 */
-	bl	.initial_tlb_book3e
+	bl	initial_tlb_book3e
 
 
 	/* Init global core bits */
 	/* Init global core bits */
-	bl	.init_core_book3e
+	bl	init_core_book3e
 
 
 	/* Init per-thread bits */
 	/* Init per-thread bits */
-	bl	.init_thread_book3e
+	bl	init_thread_book3e
 
 
 	/* Return to common init code */
 	/* Return to common init code */
 	tovirt(r28,r28)
 	tovirt(r28,r28)
@@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e)
  */
  */
 _GLOBAL(book3e_secondary_core_init_tlb_set)
 _GLOBAL(book3e_secondary_core_init_tlb_set)
 	li	r4,1
 	li	r4,1
-	b	.generic_secondary_smp_init
+	b	generic_secondary_smp_init
 
 
 _GLOBAL(book3e_secondary_core_init)
 _GLOBAL(book3e_secondary_core_init)
 	mflr	r28
 	mflr	r28
@@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init)
 	bne	2f
 	bne	2f
 
 
 	/* Setup TLB for this core */
 	/* Setup TLB for this core */
-	bl	.initial_tlb_book3e
+	bl	initial_tlb_book3e
 
 
 	/* We can return from the above running at a different
 	/* We can return from the above running at a different
 	 * address, so recalculate r2 (TOC)
 	 * address, so recalculate r2 (TOC)
 	 */
 	 */
-	bl	.relative_toc
+	bl	relative_toc
 
 
 	/* Init global core bits */
 	/* Init global core bits */
-2:	bl	.init_core_book3e
+2:	bl	init_core_book3e
 
 
 	/* Init per-thread bits */
 	/* Init per-thread bits */
-3:	bl	.init_thread_book3e
+3:	bl	init_thread_book3e
 
 
 	/* Return to common init code at proper virtual address.
 	/* Return to common init code at proper virtual address.
 	 *
 	 *
@@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init)
 	mflr	r28
 	mflr	r28
 	b	3b
 	b	3b
 
 
-_STATIC(init_core_book3e)
+init_core_book3e:
 	/* Establish the interrupt vector base */
 	/* Establish the interrupt vector base */
 	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
 	LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
 	mtspr	SPRN_IVPR,r3
 	mtspr	SPRN_IVPR,r3
 	sync
 	sync
 	blr
 	blr
 
 
-_STATIC(init_thread_book3e)
+init_thread_book3e:
 	lis	r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
 	lis	r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
 	mtspr	SPRN_EPCR,r3
 	mtspr	SPRN_EPCR,r3
 
 

+ 103 - 103
arch/powerpc/kernel/exceptions-64s.S

@@ -132,12 +132,12 @@ BEGIN_FTR_SECTION
 #endif
 #endif
 
 
 	beq	cr1,2f
 	beq	cr1,2f
-	b	.power7_wakeup_noloss
-2:	b	.power7_wakeup_loss
+	b	power7_wakeup_noloss
+2:	b	power7_wakeup_loss
 
 
 	/* Fast Sleep wakeup on PowerNV */
 	/* Fast Sleep wakeup on PowerNV */
 8:	GET_PACA(r13)
 8:	GET_PACA(r13)
-	b 	.power7_wakeup_tb_loss
+	b 	power7_wakeup_tb_loss
 
 
 9:
 9:
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
@@ -211,16 +211,16 @@ data_access_slb_pSeries:
 #endif /* __DISABLED__ */
 #endif /* __DISABLED__ */
 	mfspr	r12,SPRN_SRR1
 	mfspr	r12,SPRN_SRR1
 #ifndef CONFIG_RELOCATABLE
 #ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
+	b	slb_miss_realmode
 #else
 #else
 	/*
 	/*
-	 * We can't just use a direct branch to .slb_miss_realmode
+	 * We can't just use a direct branch to slb_miss_realmode
 	 * because the distance from here to there depends on where
 	 * because the distance from here to there depends on where
 	 * the kernel ends up being put.
 	 * the kernel ends up being put.
 	 */
 	 */
 	mfctr	r11
 	mfctr	r11
 	ld	r10,PACAKBASE(r13)
 	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
+	LOAD_HANDLER(r10, slb_miss_realmode)
 	mtctr	r10
 	mtctr	r10
 	bctr
 	bctr
 #endif
 #endif
@@ -243,11 +243,11 @@ instruction_access_slb_pSeries:
 #endif /* __DISABLED__ */
 #endif /* __DISABLED__ */
 	mfspr	r12,SPRN_SRR1
 	mfspr	r12,SPRN_SRR1
 #ifndef CONFIG_RELOCATABLE
 #ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
+	b	slb_miss_realmode
 #else
 #else
 	mfctr	r11
 	mfctr	r11
 	ld	r10,PACAKBASE(r13)
 	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
+	LOAD_HANDLER(r10, slb_miss_realmode)
 	mtctr	r10
 	mtctr	r10
 	bctr
 	bctr
 #endif
 #endif
@@ -524,7 +524,7 @@ do_stab_bolted_pSeries:
 	std	r12,PACA_EXSLB+EX_R12(r13)
 	std	r12,PACA_EXSLB+EX_R12(r13)
 	GET_SCRATCH0(r10)
 	GET_SCRATCH0(r10)
 	std	r10,PACA_EXSLB+EX_R13(r13)
 	std	r10,PACA_EXSLB+EX_R13(r13)
-	EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
+	EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD)
 
 
 	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
 	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
 	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
 	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -769,38 +769,38 @@ kvmppc_skip_Hinterrupt:
 
 
 /*** Common interrupt handlers ***/
 /*** Common interrupt handlers ***/
 
 
-	STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
 
 
 	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
 	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
-	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
-	STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
+	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
+	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
 #ifdef CONFIG_PPC_DOORBELL
 #ifdef CONFIG_PPC_DOORBELL
-	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception)
+	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
 #else
 #else
-	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception)
+	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
 #endif
 #endif
-	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
-	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
-	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
+	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
+	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
+	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
+	STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception)
 #ifdef CONFIG_PPC_DOORBELL
 #ifdef CONFIG_PPC_DOORBELL
-	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
+	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
 #else
 #else
-	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception)
+	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
 #endif
 #endif
-	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
-	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
-	STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
+	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
+	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
+	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
-	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
 #else
 #else
-	STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
 #endif
 #endif
 #ifdef CONFIG_CBE_RAS
 #ifdef CONFIG_CBE_RAS
-	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
-	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
-	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
+	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
+	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
 #endif /* CONFIG_CBE_RAS */
 #endif /* CONFIG_CBE_RAS */
 
 
 	/*
 	/*
@@ -829,16 +829,16 @@ data_access_slb_relon_pSeries:
 	mfspr	r3,SPRN_DAR
 	mfspr	r3,SPRN_DAR
 	mfspr	r12,SPRN_SRR1
 	mfspr	r12,SPRN_SRR1
 #ifndef CONFIG_RELOCATABLE
 #ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
+	b	slb_miss_realmode
 #else
 #else
 	/*
 	/*
-	 * We can't just use a direct branch to .slb_miss_realmode
+	 * We can't just use a direct branch to slb_miss_realmode
 	 * because the distance from here to there depends on where
 	 * because the distance from here to there depends on where
 	 * the kernel ends up being put.
 	 * the kernel ends up being put.
 	 */
 	 */
 	mfctr	r11
 	mfctr	r11
 	ld	r10,PACAKBASE(r13)
 	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
+	LOAD_HANDLER(r10, slb_miss_realmode)
 	mtctr	r10
 	mtctr	r10
 	bctr
 	bctr
 #endif
 #endif
@@ -854,11 +854,11 @@ instruction_access_slb_relon_pSeries:
 	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
 	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
 	mfspr	r12,SPRN_SRR1
 	mfspr	r12,SPRN_SRR1
 #ifndef CONFIG_RELOCATABLE
 #ifndef CONFIG_RELOCATABLE
-	b	.slb_miss_realmode
+	b	slb_miss_realmode
 #else
 #else
 	mfctr	r11
 	mfctr	r11
 	ld	r10,PACAKBASE(r13)
 	ld	r10,PACAKBASE(r13)
-	LOAD_HANDLER(r10, .slb_miss_realmode)
+	LOAD_HANDLER(r10, slb_miss_realmode)
 	mtctr	r10
 	mtctr	r10
 	bctr
 	bctr
 #endif
 #endif
@@ -966,7 +966,7 @@ system_call_entry:
 	b	system_call_common
 	b	system_call_common
 
 
 ppc64_runlatch_on_trampoline:
 ppc64_runlatch_on_trampoline:
-	b	.__ppc64_runlatch_on
+	b	__ppc64_runlatch_on
 
 
 /*
 /*
  * Here we have detected that the kernel stack pointer is bad.
  * Here we have detected that the kernel stack pointer is bad.
@@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 	std	r12,RESULT(r1)
 	std	r12,RESULT(r1)
 	std	r11,STACK_FRAME_OVERHEAD-16(r1)
 	std	r11,STACK_FRAME_OVERHEAD-16(r1)
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.kernel_bad_stack
+	bl	kernel_bad_stack
 	b	1b
 	b	1b
 
 
 /*
 /*
@@ -1046,7 +1046,7 @@ data_access_common:
 	ld	r3,PACA_EXGEN+EX_DAR(r13)
 	ld	r3,PACA_EXGEN+EX_DAR(r13)
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	li	r5,0x300
 	li	r5,0x300
-	b	.do_hash_page		/* Try to handle as hpte fault */
+	b	do_hash_page		/* Try to handle as hpte fault */
 
 
 	.align  7
 	.align  7
 	.globl  h_data_storage_common
 	.globl  h_data_storage_common
@@ -1056,11 +1056,11 @@ h_data_storage_common:
 	mfspr   r10,SPRN_HDSISR
 	mfspr   r10,SPRN_HDSISR
 	stw     r10,PACA_EXGEN+EX_DSISR(r13)
 	stw     r10,PACA_EXGEN+EX_DSISR(r13)
 	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
 	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
-	bl      .save_nvgprs
+	bl      save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi    r3,r1,STACK_FRAME_OVERHEAD
 	addi    r3,r1,STACK_FRAME_OVERHEAD
-	bl      .unknown_exception
-	b       .ret_from_except
+	bl      unknown_exception
+	b       ret_from_except
 
 
 	.align	7
 	.align	7
 	.globl instruction_access_common
 	.globl instruction_access_common
@@ -1071,9 +1071,9 @@ instruction_access_common:
 	ld	r3,_NIP(r1)
 	ld	r3,_NIP(r1)
 	andis.	r4,r12,0x5820
 	andis.	r4,r12,0x5820
 	li	r5,0x400
 	li	r5,0x400
-	b	.do_hash_page		/* Try to handle as hpte fault */
+	b	do_hash_page		/* Try to handle as hpte fault */
 
 
-	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
+	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
 
 
 /*
 /*
  * Here is the common SLB miss user that is used when going to virtual
  * Here is the common SLB miss user that is used when going to virtual
@@ -1088,7 +1088,7 @@ slb_miss_user_common:
 	stw	r9,PACA_EXGEN+EX_CCR(r13)
 	stw	r9,PACA_EXGEN+EX_CCR(r13)
 	std	r10,PACA_EXGEN+EX_LR(r13)
 	std	r10,PACA_EXGEN+EX_LR(r13)
 	std	r11,PACA_EXGEN+EX_SRR0(r13)
 	std	r11,PACA_EXGEN+EX_SRR0(r13)
-	bl	.slb_allocate_user
+	bl	slb_allocate_user
 
 
 	ld	r10,PACA_EXGEN+EX_LR(r13)
 	ld	r10,PACA_EXGEN+EX_LR(r13)
 	ld	r3,PACA_EXGEN+EX_R3(r13)
 	ld	r3,PACA_EXGEN+EX_R3(r13)
@@ -1131,9 +1131,9 @@ slb_miss_fault:
 unrecov_user_slb:
 unrecov_user_slb:
 	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
 	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
 	DISABLE_INTS
 	DISABLE_INTS
-	bl	.save_nvgprs
+	bl	save_nvgprs
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
+	bl	unrecoverable_exception
 	b	1b
 	b	1b
 
 
 #endif /* __DISABLED__ */
 #endif /* __DISABLED__ */
@@ -1158,10 +1158,10 @@ machine_check_common:
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	std	r3,_DAR(r1)
 	std	r3,_DAR(r1)
 	std	r4,_DSISR(r1)
 	std	r4,_DSISR(r1)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.machine_check_exception
-	b	.ret_from_except
+	bl	machine_check_exception
+	b	ret_from_except
 
 
 	.align	7
 	.align	7
 	.globl alignment_common
 	.globl alignment_common
@@ -1175,31 +1175,31 @@ alignment_common:
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
 	std	r3,_DAR(r1)
 	std	r3,_DAR(r1)
 	std	r4,_DSISR(r1)
 	std	r4,_DSISR(r1)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.alignment_exception
-	b	.ret_from_except
+	bl	alignment_exception
+	b	ret_from_except
 
 
 	.align	7
 	.align	7
 	.globl program_check_common
 	.globl program_check_common
 program_check_common:
 program_check_common:
 	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
 	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.program_check_exception
-	b	.ret_from_except
+	bl	program_check_exception
+	b	ret_from_except
 
 
 	.align	7
 	.align	7
 	.globl fp_unavailable_common
 	.globl fp_unavailable_common
 fp_unavailable_common:
 fp_unavailable_common:
 	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
 	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
 	bne	1f			/* if from user, just load it up */
 	bne	1f			/* if from user, just load it up */
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.kernel_fp_unavailable_exception
+	bl	kernel_fp_unavailable_exception
 	BUG_OPCODE
 	BUG_OPCODE
 1:
 1:
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION
 	bne-	2f
 	bne-	2f
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
 #endif
-	bl	.load_up_fpu
+	bl	load_up_fpu
 	b	fast_exception_return
 	b	fast_exception_return
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 2:	/* User process was in a transaction */
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.fp_unavailable_tm
-	b	.ret_from_except
+	bl	fp_unavailable_tm
+	b	ret_from_except
 #endif
 #endif
 	.align	7
 	.align	7
 	.globl altivec_unavailable_common
 	.globl altivec_unavailable_common
@@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION
 	bne-	2f
 	bne-	2f
   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
 #endif
 #endif
-	bl	.load_up_altivec
+	bl	load_up_altivec
 	b	fast_exception_return
 	b	fast_exception_return
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 2:	/* User process was in a transaction */
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.altivec_unavailable_tm
-	b	.ret_from_except
+	bl	altivec_unavailable_tm
+	b	ret_from_except
 #endif
 #endif
 1:
 1:
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 #endif
 #endif
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.altivec_unavailable_exception
-	b	.ret_from_except
+	bl	altivec_unavailable_exception
+	b	ret_from_except
 
 
 	.align	7
 	.align	7
 	.globl vsx_unavailable_common
 	.globl vsx_unavailable_common
@@ -1272,26 +1272,26 @@ BEGIN_FTR_SECTION
 	bne-	2f
 	bne-	2f
   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
   END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
 #endif
 #endif
-	b	.load_up_vsx
+	b	load_up_vsx
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 2:	/* User process was in a transaction */
 2:	/* User process was in a transaction */
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.vsx_unavailable_tm
-	b	.ret_from_except
+	bl	vsx_unavailable_tm
+	b	ret_from_except
 #endif
 #endif
 1:
 1:
 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif
 #endif
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	DISABLE_INTS
 	DISABLE_INTS
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.vsx_unavailable_exception
-	b	.ret_from_except
+	bl	vsx_unavailable_exception
+	b	ret_from_except
 
 
-	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
-	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
+	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
+	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
 
 
 	.align	7
 	.align	7
 	.globl	__end_handlers
 	.globl	__end_handlers
@@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler)
 machine_check_handle_early:
 machine_check_handle_early:
 	std	r0,GPR0(r1)	/* Save r0 */
 	std	r0,GPR0(r1)	/* Save r0 */
 	EXCEPTION_PROLOG_COMMON_3(0x200)
 	EXCEPTION_PROLOG_COMMON_3(0x200)
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.machine_check_early
+	bl	machine_check_early
 	ld	r12,_MSR(r1)
 	ld	r12,_MSR(r1)
 #ifdef	CONFIG_PPC_P7_NAP
 #ifdef	CONFIG_PPC_P7_NAP
 	/*
 	/*
@@ -1408,11 +1408,11 @@ machine_check_handle_early:
 	/* Supervisor state loss */
 	/* Supervisor state loss */
 	li	r0,1
 	li	r0,1
 	stb	r0,PACA_NAPSTATELOST(r13)
 	stb	r0,PACA_NAPSTATELOST(r13)
-3:	bl	.machine_check_queue_event
+3:	bl	machine_check_queue_event
 	MACHINE_CHECK_HANDLER_WINDUP
 	MACHINE_CHECK_HANDLER_WINDUP
 	GET_PACA(r13)
 	GET_PACA(r13)
 	ld	r1,PACAR1(r13)
 	ld	r1,PACAR1(r13)
-	b	.power7_enter_nap_mode
+	b	power7_enter_nap_mode
 4:
 4:
 #endif
 #endif
 	/*
 	/*
@@ -1444,7 +1444,7 @@ machine_check_handle_early:
 	andi.	r11,r12,MSR_RI
 	andi.	r11,r12,MSR_RI
 	bne	2f
 	bne	2f
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
+	bl	unrecoverable_exception
 	b	1b
 	b	1b
 2:
 2:
 	/*
 	/*
@@ -1452,7 +1452,7 @@ machine_check_handle_early:
 	 * Queue up the MCE event so that we can log it later, while
 	 * Queue up the MCE event so that we can log it later, while
 	 * returning from kernel or opal call.
 	 * returning from kernel or opal call.
 	 */
 	 */
-	bl	.machine_check_queue_event
+	bl	machine_check_queue_event
 	MACHINE_CHECK_HANDLER_WINDUP
 	MACHINE_CHECK_HANDLER_WINDUP
 	rfid
 	rfid
 9:
 9:
@@ -1468,7 +1468,7 @@ machine_check_handle_early:
  * r3 is saved in paca->slb_r3
  * r3 is saved in paca->slb_r3
  * We assume we aren't going to take any exceptions during this procedure.
  * We assume we aren't going to take any exceptions during this procedure.
  */
  */
-_GLOBAL(slb_miss_realmode)
+slb_miss_realmode:
 	mflr	r10
 	mflr	r10
 #ifdef CONFIG_RELOCATABLE
 #ifdef CONFIG_RELOCATABLE
 	mtctr	r11
 	mtctr	r11
@@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode)
 	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
 	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
 	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
 	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
 
 
-	bl	.slb_allocate_realmode
+	bl	slb_allocate_realmode
 
 
 	/* All done -- return from exception. */
 	/* All done -- return from exception. */
 
 
@@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode)
 unrecov_slb:
 unrecov_slb:
 	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
 	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
 	DISABLE_INTS
 	DISABLE_INTS
-	bl	.save_nvgprs
+	bl	save_nvgprs
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
 1:	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.unrecoverable_exception
+	bl	unrecoverable_exception
 	b	1b
 	b	1b
 
 
 
 
@@ -1536,7 +1536,7 @@ power4_fixup_nap:
  * Hash table stuff
  * Hash table stuff
  */
  */
 	.align	7
 	.align	7
-_STATIC(do_hash_page)
+do_hash_page:
 	std	r3,_DAR(r1)
 	std	r3,_DAR(r1)
 	std	r4,_DSISR(r1)
 	std	r4,_DSISR(r1)
 
 
@@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
 	 *
 	 *
 	 * at return r3 = 0 for success, 1 for page fault, negative for error
 	 * at return r3 = 0 for success, 1 for page fault, negative for error
 	 */
 	 */
-	bl	.hash_page		/* build HPTE if possible */
+	bl	hash_page		/* build HPTE if possible */
 	cmpdi	r3,0			/* see if hash_page succeeded */
 	cmpdi	r3,0			/* see if hash_page succeeded */
 
 
 	/* Success */
 	/* Success */
@@ -1587,35 +1587,35 @@ handle_page_fault:
 11:	ld	r4,_DAR(r1)
 11:	ld	r4,_DAR(r1)
 	ld	r5,_DSISR(r1)
 	ld	r5,_DSISR(r1)
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.do_page_fault
+	bl	do_page_fault
 	cmpdi	r3,0
 	cmpdi	r3,0
 	beq+	12f
 	beq+	12f
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	mr	r5,r3
 	mr	r5,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	lwz	r4,_DAR(r1)
 	lwz	r4,_DAR(r1)
-	bl	.bad_page_fault
-	b	.ret_from_except
+	bl	bad_page_fault
+	b	ret_from_except
 
 
 /* We have a data breakpoint exception - handle it */
 /* We have a data breakpoint exception - handle it */
 handle_dabr_fault:
 handle_dabr_fault:
-	bl	.save_nvgprs
+	bl	save_nvgprs
 	ld      r4,_DAR(r1)
 	ld      r4,_DAR(r1)
 	ld      r5,_DSISR(r1)
 	ld      r5,_DSISR(r1)
 	addi    r3,r1,STACK_FRAME_OVERHEAD
 	addi    r3,r1,STACK_FRAME_OVERHEAD
-	bl      .do_break
-12:	b       .ret_from_except_lite
+	bl      do_break
+12:	b       ret_from_except_lite
 
 
 
 
 /* We have a page fault that hash_page could handle but HV refused
 /* We have a page fault that hash_page could handle but HV refused
  * the PTE insertion
  * the PTE insertion
  */
  */
-13:	bl	.save_nvgprs
+13:	bl	save_nvgprs
 	mr	r5,r3
 	mr	r5,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	ld	r4,_DAR(r1)
 	ld	r4,_DAR(r1)
-	bl	.low_hash_fault
-	b	.ret_from_except
+	bl	low_hash_fault
+	b	ret_from_except
 
 
 /*
 /*
  * We come here as a result of a DSI at a point where we don't want
  * We come here as a result of a DSI at a point where we don't want
@@ -1624,16 +1624,16 @@ handle_dabr_fault:
  * were soft-disabled.  We want to invoke the exception handler for
  * were soft-disabled.  We want to invoke the exception handler for
  * the access, or panic if there isn't a handler.
  * the access, or panic if there isn't a handler.
  */
  */
-77:	bl	.save_nvgprs
+77:	bl	save_nvgprs
 	mr	r4,r3
 	mr	r4,r3
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	li	r5,SIGSEGV
 	li	r5,SIGSEGV
-	bl	.bad_page_fault
-	b	.ret_from_except
+	bl	bad_page_fault
+	b	ret_from_except
 
 
 	/* here we have a segment miss */
 	/* here we have a segment miss */
 do_ste_alloc:
 do_ste_alloc:
-	bl	.ste_allocate		/* try to insert stab entry */
+	bl	ste_allocate		/* try to insert stab entry */
 	cmpdi	r3,0
 	cmpdi	r3,0
 	bne-	handle_page_fault
 	bne-	handle_page_fault
 	b	fast_exception_return
 	b	fast_exception_return
@@ -1646,7 +1646,7 @@ do_ste_alloc:
  * We assume (DAR >> 60) == 0xc.
  * We assume (DAR >> 60) == 0xc.
  */
  */
 	.align	7
 	.align	7
-_GLOBAL(do_stab_bolted)
+do_stab_bolted:
 	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
 	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
 	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
 	std	r11,PACA_EXSLB+EX_SRR0(r13)	/* save SRR0 in exc. frame */
 	mfspr	r11,SPRN_DAR			/* ea */
 	mfspr	r11,SPRN_DAR			/* ea */

+ 39 - 98
arch/powerpc/kernel/ftrace.c

@@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod,
 		  struct dyn_ftrace *rec, unsigned long addr)
 		  struct dyn_ftrace *rec, unsigned long addr)
 {
 {
 	unsigned int op;
 	unsigned int op;
-	unsigned int jmp[5];
 	unsigned long ptr;
 	unsigned long ptr;
 	unsigned long ip = rec->ip;
 	unsigned long ip = rec->ip;
-	unsigned long tramp;
-	int offset;
+	void *tramp;
 
 
 	/* read where this goes */
 	/* read where this goes */
 	if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
 	if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
@@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod,
 	}
 	}
 
 
 	/* lets find where the pointer goes */
 	/* lets find where the pointer goes */
-	tramp = find_bl_target(ip, op);
-
-	/*
-	 * On PPC64 the trampoline looks like:
-	 * 0x3d, 0x82, 0x00, 0x00,    addis   r12,r2, <high>
-	 * 0x39, 0x8c, 0x00, 0x00,    addi    r12,r12, <low>
-	 *   Where the bytes 2,3,6 and 7 make up the 32bit offset
-	 *   to the TOC that holds the pointer.
-	 *   to jump to.
-	 * 0xf8, 0x41, 0x00, 0x28,    std     r2,40(r1)
-	 * 0xe9, 0x6c, 0x00, 0x20,    ld      r11,32(r12)
-	 *   The actually address is 32 bytes from the offset
-	 *   into the TOC.
-	 * 0xe8, 0x4c, 0x00, 0x28,    ld      r2,40(r12)
-	 */
-
-	pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
-
-	/* Find where the trampoline jumps to */
-	if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
-		printk(KERN_ERR "Failed to read %lx\n", tramp);
-		return -EFAULT;
-	}
+	tramp = (void *)find_bl_target(ip, op);
 
 
-	pr_devel(" %08x %08x", jmp[0], jmp[1]);
+	pr_devel("ip:%lx jumps to %p", ip, tramp);
 
 
-	/* verify that this is what we expect it to be */
-	if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
-	    ((jmp[1] & 0xffff0000) != 0x398c0000) ||
-	    (jmp[2] != 0xf8410028) ||
-	    (jmp[3] != 0xe96c0020) ||
-	    (jmp[4] != 0xe84c0028)) {
+	if (!is_module_trampoline(tramp)) {
 		printk(KERN_ERR "Not a trampoline\n");
 		printk(KERN_ERR "Not a trampoline\n");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	/* The bottom half is signed extended */
-	offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
-		(int)((short)jmp[1]);
-
-	pr_devel(" %x ", offset);
-
-	/* get the address this jumps too */
-	tramp = mod->arch.toc + offset + 32;
-	pr_devel("toc: %lx", tramp);
-
-	if (probe_kernel_read(jmp, (void *)tramp, 8)) {
-		printk(KERN_ERR "Failed to read %lx\n", tramp);
+	if (module_trampoline_target(mod, tramp, &ptr)) {
+		printk(KERN_ERR "Failed to get trampoline target\n");
 		return -EFAULT;
 		return -EFAULT;
 	}
 	}
 
 
-	pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
-
-#ifdef __LITTLE_ENDIAN__
-	ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
-#else
-	ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
-#endif
+	pr_devel("trampoline target %lx", ptr);
 
 
 	/* This should match what was called */
 	/* This should match what was called */
 	if (ptr != ppc_function_entry((void *)addr)) {
 	if (ptr != ppc_function_entry((void *)addr)) {
-		printk(KERN_ERR "addr does not match %lx\n", ptr);
+		printk(KERN_ERR "addr %lx does not match expected %lx\n",
+			ptr, ppc_function_entry((void *)addr));
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
 	/*
 	/*
-	 * We want to nop the line, but the next line is
-	 *  0xe8, 0x41, 0x00, 0x28   ld r2,40(r1)
-	 * This needs to be turned to a nop too.
-	 */
-	if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
-		return -EFAULT;
-
-	if (op != 0xe8410028) {
-		printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
-		return -EINVAL;
-	}
-
-	/*
-	 * Milton Miller pointed out that we can not blindly do nops.
-	 * If a task was preempted when calling a trace function,
-	 * the nops will remove the way to restore the TOC in r2
-	 * and the r2 TOC will get corrupted.
-	 */
-
-	/*
-	 * Replace:
-	 *   bl <tramp>  <==== will be replaced with "b 1f"
-	 *   ld r2,40(r1)
-	 *  1:
+	 * Our original call site looks like:
+	 *
+	 * bl <tramp>
+	 * ld r2,XX(r1)
+	 *
+	 * Milton Miller pointed out that we can not simply nop the branch.
+	 * If a task was preempted when calling a trace function, the nops
+	 * will remove the way to restore the TOC in r2 and the r2 TOC will
+	 * get corrupted.
+	 *
+	 * Use a b +8 to jump over the load.
 	 */
 	 */
 	op = 0x48000008;	/* b +8 */
 	op = 0x48000008;	/* b +8 */
 
 
@@ -349,19 +292,24 @@ static int
 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
 {
 	unsigned int op[2];
 	unsigned int op[2];
-	unsigned long ip = rec->ip;
+	void *ip = (void *)rec->ip;
 
 
 	/* read where this goes */
 	/* read where this goes */
-	if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
+	if (probe_kernel_read(op, ip, sizeof(op)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	/*
 	/*
-	 * It should be pointing to two nops or
-	 *  b +8; ld r2,40(r1)
+	 * We expect to see:
+	 *
+	 * b +8
+	 * ld r2,XX(r1)
+	 *
+	 * The load offset is different depending on the ABI. For simplicity
+	 * just mask it out when doing the compare.
 	 */
 	 */
-	if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
-	    ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) {
-		printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
+	if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
+		printk(KERN_ERR "Unexpected call sequence: %x %x\n",
+			op[0], op[1]);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
@@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	/* create the branch to the trampoline */
-	op[0] = create_branch((unsigned int *)ip,
-			      rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
-	if (!op[0]) {
-		printk(KERN_ERR "REL24 out of range!\n");
+	/* Ensure branch is within 24 bits */
+	if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+		printk(KERN_ERR "Branch out of range");
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	/* ld r2,40(r1) */
-	op[1] = 0xe8410028;
-
-	pr_devel("write to %lx\n", rec->ip);
-
-	if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
-		return -EPERM;
-
-	flush_icache_range(ip, ip + 8);
+	if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+		printk(KERN_ERR "REL24 out of range!\n");
+		return -EINVAL;
+	}
 
 
 	return 0;
 	return 0;
 }
 }

+ 59 - 58
arch/powerpc/kernel/head_64.S

@@ -70,16 +70,15 @@ _GLOBAL(__start)
 	/* NOP this out unconditionally */
 	/* NOP this out unconditionally */
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	FIXUP_ENDIAN
 	FIXUP_ENDIAN
-	b	.__start_initialization_multiplatform
+	b	__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
 END_FTR_SECTION(0, 1)
 
 
 	/* Catch branch to 0 in real mode */
 	/* Catch branch to 0 in real mode */
 	trap
 	trap
 
 
-	/* Secondary processors spin on this value until it becomes nonzero.
-	 * When it does it contains the real address of the descriptor
-	 * of the function that the cpu should jump to to continue
-	 * initialization.
+	/* Secondary processors spin on this value until it becomes non-zero.
+	 * When non-zero, it contains the real address of the function the cpu
+	 * should jump to.
 	 */
 	 */
 	.balign 8
 	.balign 8
 	.globl  __secondary_hold_spinloop
 	.globl  __secondary_hold_spinloop
@@ -140,16 +139,15 @@ __secondary_hold:
 	tovirt(r26,r26)
 	tovirt(r26,r26)
 #endif
 #endif
 	/* All secondary cpus wait here until told to start. */
 	/* All secondary cpus wait here until told to start. */
-100:	ld	r4,__secondary_hold_spinloop-_stext(r26)
-	cmpdi	0,r4,0
+100:	ld	r12,__secondary_hold_spinloop-_stext(r26)
+	cmpdi	0,r12,0
 	beq	100b
 	beq	100b
 
 
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
-	tovirt(r4,r4)
+	tovirt(r12,r12)
 #endif
 #endif
-	ld	r4,0(r4)		/* deref function descriptor */
-	mtctr	r4
+	mtctr	r12
 	mr	r3,r24
 	mr	r3,r24
 	/*
 	/*
 	 * it may be the case that other platforms have r4 right to
 	 * it may be the case that other platforms have r4 right to
@@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init)
 	mr	r24,r3
 	mr	r24,r3
 
 
 	/* turn on 64-bit mode */
 	/* turn on 64-bit mode */
-	bl	.enable_64b_mode
+	bl	enable_64b_mode
 
 
 	/* get a valid TOC pointer, wherever we're mapped at */
 	/* get a valid TOC pointer, wherever we're mapped at */
-	bl	.relative_toc
+	bl	relative_toc
 	tovirt(r2,r2)
 	tovirt(r2,r2)
 
 
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
 	/* Book3E initialization */
 	/* Book3E initialization */
 	mr	r3,r24
 	mr	r3,r24
-	bl	.book3e_secondary_thread_init
+	bl	book3e_secondary_thread_init
 #endif
 #endif
 	b	generic_secondary_common_init
 	b	generic_secondary_common_init
 
 
@@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init)
 	mr	r25,r4
 	mr	r25,r4
 
 
 	/* turn on 64-bit mode */
 	/* turn on 64-bit mode */
-	bl	.enable_64b_mode
+	bl	enable_64b_mode
 
 
 	/* get a valid TOC pointer, wherever we're mapped at */
 	/* get a valid TOC pointer, wherever we're mapped at */
-	bl	.relative_toc
+	bl	relative_toc
 	tovirt(r2,r2)
 	tovirt(r2,r2)
 
 
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
 	/* Book3E initialization */
 	/* Book3E initialization */
 	mr	r3,r24
 	mr	r3,r24
 	mr	r4,r25
 	mr	r4,r25
-	bl	.book3e_secondary_core_init
+	bl	book3e_secondary_core_init
 #endif
 #endif
 
 
 generic_secondary_common_init:
 generic_secondary_common_init:
@@ -236,7 +234,7 @@ generic_secondary_common_init:
 	ld	r13,0(r13)		/* Get base vaddr of paca array	 */
 	ld	r13,0(r13)		/* Get base vaddr of paca array	 */
 #ifndef CONFIG_SMP
 #ifndef CONFIG_SMP
 	addi	r13,r13,PACA_SIZE	/* know r13 if used accidentally */
 	addi	r13,r13,PACA_SIZE	/* know r13 if used accidentally */
-	b	.kexec_wait		/* wait for next kernel if !SMP	 */
+	b	kexec_wait		/* wait for next kernel if !SMP	 */
 #else
 #else
 	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
 	LOAD_REG_ADDR(r7, nr_cpu_ids)	/* Load nr_cpu_ids address       */
 	lwz	r7,0(r7)		/* also the max paca allocated 	 */
 	lwz	r7,0(r7)		/* also the max paca allocated 	 */
@@ -250,7 +248,7 @@ generic_secondary_common_init:
 	blt	1b
 	blt	1b
 
 
 	mr	r3,r24			/* not found, copy phys to r3	 */
 	mr	r3,r24			/* not found, copy phys to r3	 */
-	b	.kexec_wait		/* next kernel might do better	 */
+	b	kexec_wait		/* next kernel might do better	 */
 
 
 2:	SET_PACA(r13)
 2:	SET_PACA(r13)
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
@@ -264,11 +262,13 @@ generic_secondary_common_init:
 	/* See if we need to call a cpu state restore handler */
 	/* See if we need to call a cpu state restore handler */
 	LOAD_REG_ADDR(r23, cur_cpu_spec)
 	LOAD_REG_ADDR(r23, cur_cpu_spec)
 	ld	r23,0(r23)
 	ld	r23,0(r23)
-	ld	r23,CPU_SPEC_RESTORE(r23)
-	cmpdi	0,r23,0
+	ld	r12,CPU_SPEC_RESTORE(r23)
+	cmpdi	0,r12,0
 	beq	3f
 	beq	3f
-	ld	r23,0(r23)
-	mtctr	r23
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+	ld	r12,0(r12)
+#endif
+	mtctr	r12
 	bctrl
 	bctrl
 
 
 3:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
 3:	LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
@@ -299,7 +299,7 @@ generic_secondary_common_init:
  * Assumes we're mapped EA == RA if the MMU is on.
  * Assumes we're mapped EA == RA if the MMU is on.
  */
  */
 #ifdef CONFIG_PPC_BOOK3S
 #ifdef CONFIG_PPC_BOOK3S
-_STATIC(__mmu_off)
+__mmu_off:
 	mfmsr	r3
 	mfmsr	r3
 	andi.	r0,r3,MSR_IR|MSR_DR
 	andi.	r0,r3,MSR_IR|MSR_DR
 	beqlr
 	beqlr
@@ -324,12 +324,12 @@ _STATIC(__mmu_off)
  *                 DT block, r4 is a physical pointer to the kernel itself
  *                 DT block, r4 is a physical pointer to the kernel itself
  *
  *
  */
  */
-_GLOBAL(__start_initialization_multiplatform)
+__start_initialization_multiplatform:
 	/* Make sure we are running in 64 bits mode */
 	/* Make sure we are running in 64 bits mode */
-	bl	.enable_64b_mode
+	bl	enable_64b_mode
 
 
 	/* Get TOC pointer (current runtime address) */
 	/* Get TOC pointer (current runtime address) */
-	bl	.relative_toc
+	bl	relative_toc
 
 
 	/* find out where we are now */
 	/* find out where we are now */
 	bcl	20,31,$+4
 	bcl	20,31,$+4
@@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform)
 	 */
 	 */
 	cmpldi	cr0,r5,0
 	cmpldi	cr0,r5,0
 	beq	1f
 	beq	1f
-	b	.__boot_from_prom		/* yes -> prom */
+	b	__boot_from_prom		/* yes -> prom */
 1:
 1:
 	/* Save parameters */
 	/* Save parameters */
 	mr	r31,r3
 	mr	r31,r3
@@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform)
 #endif
 #endif
 
 
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
-	bl	.start_initialization_book3e
-	b	.__after_prom_start
+	bl	start_initialization_book3e
+	b	__after_prom_start
 #else
 #else
 	/* Setup some critical 970 SPRs before switching MMU off */
 	/* Setup some critical 970 SPRs before switching MMU off */
 	mfspr	r0,SPRN_PVR
 	mfspr	r0,SPRN_PVR
@@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform)
 	beq	1f
 	beq	1f
 	cmpwi	r0,0x45		/* 970GX */
 	cmpwi	r0,0x45		/* 970GX */
 	bne	2f
 	bne	2f
-1:	bl	.__cpu_preinit_ppc970
+1:	bl	__cpu_preinit_ppc970
 2:
 2:
 
 
 	/* Switch off MMU if not already off */
 	/* Switch off MMU if not already off */
-	bl	.__mmu_off
-	b	.__after_prom_start
+	bl	__mmu_off
+	b	__after_prom_start
 #endif /* CONFIG_PPC_BOOK3E */
 #endif /* CONFIG_PPC_BOOK3E */
 
 
-_INIT_STATIC(__boot_from_prom)
+__boot_from_prom:
 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
 	/* Save parameters */
 	/* Save parameters */
 	mr	r31,r3
 	mr	r31,r3
@@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom)
 #ifdef CONFIG_RELOCATABLE
 #ifdef CONFIG_RELOCATABLE
 	/* Relocate code for where we are now */
 	/* Relocate code for where we are now */
 	mr	r3,r26
 	mr	r3,r26
-	bl	.relocate
+	bl	relocate
 #endif
 #endif
 
 
 	/* Restore parameters */
 	/* Restore parameters */
@@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom)
 
 
 	/* Do all of the interaction with OF client interface */
 	/* Do all of the interaction with OF client interface */
 	mr	r8,r26
 	mr	r8,r26
-	bl	.prom_init
+	bl	prom_init
 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
 
 
 	/* We never return. We also hit that trap if trying to boot
 	/* We never return. We also hit that trap if trying to boot
 	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
 	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
 	trap
 	trap
 
 
-_STATIC(__after_prom_start)
+__after_prom_start:
 #ifdef CONFIG_RELOCATABLE
 #ifdef CONFIG_RELOCATABLE
 	/* process relocations for the final address of the kernel */
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
@@ -424,7 +424,7 @@ _STATIC(__after_prom_start)
 	bne	1f
 	bne	1f
 	add	r25,r25,r26
 	add	r25,r25,r26
 1:	mr	r3,r25
 1:	mr	r3,r25
-	bl	.relocate
+	bl	relocate
 #endif
 #endif
 
 
 /*
 /*
@@ -464,12 +464,12 @@ _STATIC(__after_prom_start)
 	lis	r5,(copy_to_here - _stext)@ha
 	lis	r5,(copy_to_here - _stext)@ha
 	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
 	addi	r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */
 
 
-	bl	.copy_and_flush		/* copy the first n bytes	 */
+	bl	copy_and_flush		/* copy the first n bytes	 */
 					/* this includes the code being	 */
 					/* this includes the code being	 */
 					/* executed here.		 */
 					/* executed here.		 */
 	addis	r8,r3,(4f - _stext)@ha	/* Jump to the copy of this code */
 	addis	r8,r3,(4f - _stext)@ha	/* Jump to the copy of this code */
-	addi	r8,r8,(4f - _stext)@l	/* that we just made */
-	mtctr	r8
+	addi	r12,r8,(4f - _stext)@l	/* that we just made */
+	mtctr	r12
 	bctr
 	bctr
 
 
 .balign 8
 .balign 8
@@ -478,9 +478,9 @@ p_end:	.llong	_end - _stext
 4:	/* Now copy the rest of the kernel up to _end */
 4:	/* Now copy the rest of the kernel up to _end */
 	addis	r5,r26,(p_end - _stext)@ha
 	addis	r5,r26,(p_end - _stext)@ha
 	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
 	ld	r5,(p_end - _stext)@l(r5)	/* get _end */
-5:	bl	.copy_and_flush		/* copy the rest */
+5:	bl	copy_and_flush		/* copy the rest */
 
 
-9:	b	.start_here_multiplatform
+9:	b	start_here_multiplatform
 
 
 /*
 /*
  * Copy routine used to copy the kernel to start at physical address 0
  * Copy routine used to copy the kernel to start at physical address 0
@@ -544,7 +544,7 @@ __secondary_start_pmac_0:
 	
 	
 _GLOBAL(pmac_secondary_start)
 _GLOBAL(pmac_secondary_start)
 	/* turn on 64-bit mode */
 	/* turn on 64-bit mode */
-	bl	.enable_64b_mode
+	bl	enable_64b_mode
 
 
 	li	r0,0
 	li	r0,0
 	mfspr	r3,SPRN_HID4
 	mfspr	r3,SPRN_HID4
@@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start)
 	slbia
 	slbia
 
 
 	/* get TOC pointer (real address) */
 	/* get TOC pointer (real address) */
-	bl	.relative_toc
+	bl	relative_toc
 	tovirt(r2,r2)
 	tovirt(r2,r2)
 
 
 	/* Copy some CPU settings from CPU 0 */
 	/* Copy some CPU settings from CPU 0 */
-	bl	.__restore_cpu_ppc970
+	bl	__restore_cpu_ppc970
 
 
 	/* pSeries do that early though I don't think we really need it */
 	/* pSeries do that early though I don't think we really need it */
 	mfmsr	r3
 	mfmsr	r3
@@ -619,7 +619,7 @@ __secondary_start:
 	std	r14,PACAKSAVE(r13)
 	std	r14,PACAKSAVE(r13)
 
 
 	/* Do early setup for that CPU (stab, slb, hash table pointer) */
 	/* Do early setup for that CPU (stab, slb, hash table pointer) */
-	bl	.early_setup_secondary
+	bl	early_setup_secondary
 
 
 	/*
 	/*
 	 * setup the new stack pointer, but *don't* use this until
 	 * setup the new stack pointer, but *don't* use this until
@@ -639,7 +639,7 @@ __secondary_start:
 	stb	r0,PACAIRQHAPPENED(r13)
 	stb	r0,PACAIRQHAPPENED(r13)
 
 
 	/* enable MMU and jump to start_secondary */
 	/* enable MMU and jump to start_secondary */
-	LOAD_REG_ADDR(r3, .start_secondary_prolog)
+	LOAD_REG_ADDR(r3, start_secondary_prolog)
 	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
 	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
 
 
 	mtspr	SPRN_SRR0,r3
 	mtspr	SPRN_SRR0,r3
@@ -652,11 +652,11 @@ __secondary_start:
  * zero the stack back-chain pointer and get the TOC virtual address
  * zero the stack back-chain pointer and get the TOC virtual address
  * before going into C code.
  * before going into C code.
  */
  */
-_GLOBAL(start_secondary_prolog)
+start_secondary_prolog:
 	ld	r2,PACATOC(r13)
 	ld	r2,PACATOC(r13)
 	li	r3,0
 	li	r3,0
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
-	bl	.start_secondary
+	bl	start_secondary
 	b	.
 	b	.
 /*
 /*
  * Reset stack pointer and call start_secondary
  * Reset stack pointer and call start_secondary
@@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume)
 	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
 	ld	r1,PACAKSAVE(r13)	/* Reload kernel stack pointer */
 	li	r3,0
 	li	r3,0
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
 	std	r3,0(r1)		/* Zero the stack frame pointer	*/
-	bl	.start_secondary
+	bl	start_secondary
 	b	.
 	b	.
 #endif
 #endif
 
 
 /*
 /*
  * This subroutine clobbers r11 and r12
  * This subroutine clobbers r11 and r12
  */
  */
-_GLOBAL(enable_64b_mode)
+enable_64b_mode:
 	mfmsr	r11			/* grab the current MSR */
 	mfmsr	r11			/* grab the current MSR */
 #ifdef CONFIG_PPC_BOOK3E
 #ifdef CONFIG_PPC_BOOK3E
 	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
 	oris	r11,r11,0x8000		/* CM bit set, we'll set ICM later */
@@ -715,9 +715,9 @@ p_toc:	.llong	__toc_start + 0x8000 - 0b
 /*
 /*
  * This is where the main kernel code starts.
  * This is where the main kernel code starts.
  */
  */
-_INIT_STATIC(start_here_multiplatform)
+start_here_multiplatform:
 	/* set up the TOC */
 	/* set up the TOC */
-	bl      .relative_toc
+	bl      relative_toc
 	tovirt(r2,r2)
 	tovirt(r2,r2)
 
 
 	/* Clear out the BSS. It may have been done in prom_init,
 	/* Clear out the BSS. It may have been done in prom_init,
@@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform)
 
 
 	/* Restore parameters passed from prom_init/kexec */
 	/* Restore parameters passed from prom_init/kexec */
 	mr	r3,r31
 	mr	r3,r31
-	bl	.early_setup		/* also sets r13 and SPRG_PACA */
+	bl	early_setup		/* also sets r13 and SPRG_PACA */
 
 
-	LOAD_REG_ADDR(r3, .start_here_common)
+	LOAD_REG_ADDR(r3, start_here_common)
 	ld	r4,PACAKMSR(r13)
 	ld	r4,PACAKMSR(r13)
 	mtspr	SPRN_SRR0,r3
 	mtspr	SPRN_SRR0,r3
 	mtspr	SPRN_SRR1,r4
 	mtspr	SPRN_SRR1,r4
@@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform)
 	b	.	/* prevent speculative execution */
 	b	.	/* prevent speculative execution */
 	
 	
 	/* This is where all platforms converge execution */
 	/* This is where all platforms converge execution */
-_INIT_GLOBAL(start_here_common)
+
+start_here_common:
 	/* relocation is on at this point */
 	/* relocation is on at this point */
 	std	r1,PACAKSAVE(r13)
 	std	r1,PACAKSAVE(r13)
 
 
@@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common)
 	ld	r2,PACATOC(r13)
 	ld	r2,PACATOC(r13)
 
 
 	/* Do more system initializations in virtual mode */
 	/* Do more system initializations in virtual mode */
-	bl	.setup_system
+	bl	setup_system
 
 
 	/* Mark interrupts soft and hard disabled (they might be enabled
 	/* Mark interrupts soft and hard disabled (they might be enabled
 	 * in the PACA when doing hotplug)
 	 * in the PACA when doing hotplug)
@@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common)
 	stb	r0,PACAIRQHAPPENED(r13)
 	stb	r0,PACAIRQHAPPENED(r13)
 
 
 	/* Generic kernel entry */
 	/* Generic kernel entry */
-	bl	.start_kernel
+	bl	start_kernel
 
 
 	/* Not reached */
 	/* Not reached */
 	BUG_OPCODE
 	BUG_OPCODE

+ 1 - 1
arch/powerpc/kernel/idle_book3e.S

@@ -43,7 +43,7 @@ _GLOBAL(\name)
 	 */
 	 */
 #ifdef CONFIG_TRACE_IRQFLAGS
 #ifdef CONFIG_TRACE_IRQFLAGS
 	stdu    r1,-128(r1)
 	stdu    r1,-128(r1)
-	bl	.trace_hardirqs_on
+	bl	trace_hardirqs_on
 	addi    r1,r1,128
 	addi    r1,r1,128
 #endif
 #endif
 	li	r0,1
 	li	r0,1

+ 1 - 1
arch/powerpc/kernel/idle_power4.S

@@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
 	mflr	r0
 	mflr	r0
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu    r1,-128(r1)
 	stdu    r1,-128(r1)
-	bl	.trace_hardirqs_on
+	bl	trace_hardirqs_on
 	addi    r1,r1,128
 	addi    r1,r1,128
 	ld	r0,16(r1)
 	ld	r0,16(r1)
 	mtlr	r0
 	mtlr	r0

+ 2 - 2
arch/powerpc/kernel/idle_power7.S

@@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common)
 	/* Make sure FPU, VSX etc... are flushed as we may lose
 	/* Make sure FPU, VSX etc... are flushed as we may lose
 	 * state when going to nap mode
 	 * state when going to nap mode
 	 */
 	 */
-	bl	.discard_lazy_cpu_state
+	bl	discard_lazy_cpu_state
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
 	/* Hard disable interrupts */
 	/* Hard disable interrupts */
@@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss)
 _GLOBAL(power7_wakeup_noloss)
 _GLOBAL(power7_wakeup_noloss)
 	lbz	r0,PACA_NAPSTATELOST(r13)
 	lbz	r0,PACA_NAPSTATELOST(r13)
 	cmpwi	r0,0
 	cmpwi	r0,0
-	bne	.power7_wakeup_loss
+	bne	power7_wakeup_loss
 	ld	r1,PACAR1(r13)
 	ld	r1,PACAR1(r13)
 	ld	r4,_MSR(r1)
 	ld	r4,_MSR(r1)
 	ld	r5,_NIP(r1)
 	ld	r5,_NIP(r1)

+ 39 - 7
arch/powerpc/kernel/misc_64.S

@@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq)
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 	mr	r1,r3
 	mr	r1,r3
-	bl	.__do_softirq
+	bl	__do_softirq
 	ld	r1,0(r1)
 	ld	r1,0(r1)
 	ld	r0,16(r1)
 	ld	r0,16(r1)
 	mtlr	r0
 	mtlr	r0
@@ -45,7 +45,7 @@ _GLOBAL(call_do_irq)
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
 	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
 	mr	r1,r4
 	mr	r1,r4
-	bl	.__do_irq
+	bl	__do_irq
 	ld	r1,0(r1)
 	ld	r1,0(r1)
 	ld	r0,16(r1)
 	ld	r0,16(r1)
 	mtlr	r0
 	mtlr	r0
@@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait)
 	stb	r4,PACAKEXECSTATE(r13)
 	stb	r4,PACAKEXECSTATE(r13)
 	SYNC
 	SYNC
 
 
-	b	.kexec_wait
+	b	kexec_wait
 
 
 /*
 /*
  * switch to real mode (turn mmu off)
  * switch to real mode (turn mmu off)
@@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence)
 
 
 	/* copy dest pages, flush whole dest image */
 	/* copy dest pages, flush whole dest image */
 	mr	r3,r29
 	mr	r3,r29
-	bl	.kexec_copy_flush	/* (image) */
+	bl	kexec_copy_flush	/* (image) */
 
 
 	/* turn off mmu */
 	/* turn off mmu */
 	bl	real_mode
 	bl	real_mode
@@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence)
 	mr	r4,r30		/* start, aka phys mem offset */
 	mr	r4,r30		/* start, aka phys mem offset */
 	li	r5,0x100
 	li	r5,0x100
 	li	r6,0
 	li	r6,0
-	bl	.copy_and_flush	/* (dest, src, copy limit, start offset) */
+	bl	copy_and_flush	/* (dest, src, copy limit, start offset) */
 1:	/* assume normal blr return */
 1:	/* assume normal blr return */
 
 
 	/* release other cpus to the new kernel secondary start at 0x60 */
 	/* release other cpus to the new kernel secondary start at 0x60 */
@@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence)
 	stw	r6,kexec_flag-1b(5)
 	stw	r6,kexec_flag-1b(5)
 
 
 	/* clear out hardware hash page table and tlb */
 	/* clear out hardware hash page table and tlb */
-	ld	r5,0(r27)		/* deref function descriptor */
-	mtctr	r5
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+	ld	r12,0(r27)		/* deref function descriptor */
+#else
+	mr	r12,r27
+#endif
+	mtctr	r12
 	bctrl				/* ppc_md.hpte_clear_all(void); */
 	bctrl				/* ppc_md.hpte_clear_all(void); */
 
 
 /*
 /*
@@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence)
 	li	r5,0
 	li	r5,0
 	blr	/* image->start(physid, image->start, 0); */
 	blr	/* image->start(physid, image->start, 0); */
 #endif /* CONFIG_KEXEC */
 #endif /* CONFIG_KEXEC */
+
+#ifdef CONFIG_MODULES
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+
+#ifdef CONFIG_MODVERSIONS
+.weak __crc_TOC.
+.section "___kcrctab+TOC.","a"
+.globl __kcrctab_TOC.
+__kcrctab_TOC.:
+	.llong	__crc_TOC.
+#endif
+
+/*
+ * Export a fake .TOC. since both modpost and depmod will complain otherwise.
+ * Both modpost and depmod strip the leading . so we do the same here.
+ */
+.section "__ksymtab_strings","a"
+__kstrtab_TOC.:
+	.asciz "TOC."
+
+.section "___ksymtab+TOC.","a"
+/* This symbol name is important: it's used by modpost to find exported syms */
+.globl __ksymtab_TOC.
+__ksymtab_TOC.:
+	.llong 0 /* .value */
+	.llong __kstrtab_TOC.
+#endif /* ELFv2 */
+#endif /* MODULES */

+ 225 - 54
arch/powerpc/kernel/module_64.c

@@ -22,6 +22,7 @@
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/ftrace.h>
 #include <linux/ftrace.h>
 #include <linux/bug.h>
 #include <linux/bug.h>
+#include <linux/uaccess.h>
 #include <asm/module.h>
 #include <asm/module.h>
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/code-patching.h>
 #include <asm/code-patching.h>
@@ -41,46 +42,170 @@
 #define DEBUGP(fmt , ...)
 #define DEBUGP(fmt , ...)
 #endif
 #endif
 
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define R2_STACK_OFFSET 24
+
+/* An address is simply the address of the function. */
+typedef unsigned long func_desc_t;
+
+static func_desc_t func_desc(unsigned long addr)
+{
+	return addr;
+}
+static unsigned long func_addr(unsigned long addr)
+{
+	return addr;
+}
+static unsigned long stub_func_addr(func_desc_t func)
+{
+	return func;
+}
+
+/* PowerPC64 specific values for the Elf64_Sym st_other field.  */
+#define STO_PPC64_LOCAL_BIT	5
+#define STO_PPC64_LOCAL_MASK	(7 << STO_PPC64_LOCAL_BIT)
+#define PPC64_LOCAL_ENTRY_OFFSET(other)					\
+ (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
+
+static unsigned int local_entry_offset(const Elf64_Sym *sym)
+{
+	/* sym->st_other indicates offset to local entry point
+	 * (otherwise it will assume r12 is the address of the start
+	 * of function and try to derive r2 from it). */
+	return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
+}
+#else
+#define R2_STACK_OFFSET 40
+
+/* An address is address of the OPD entry, which contains address of fn. */
+typedef struct ppc64_opd_entry func_desc_t;
+
+static func_desc_t func_desc(unsigned long addr)
+{
+	return *(struct ppc64_opd_entry *)addr;
+}
+static unsigned long func_addr(unsigned long addr)
+{
+	return func_desc(addr).funcaddr;
+}
+static unsigned long stub_func_addr(func_desc_t func)
+{
+	return func.funcaddr;
+}
+static unsigned int local_entry_offset(const Elf64_Sym *sym)
+{
+	return 0;
+}
+#endif
+
 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
 /* Like PPC32, we need little trampolines to do > 24-bit jumps (into
    the kernel itself).  But on PPC64, these need to be used for every
    the kernel itself).  But on PPC64, these need to be used for every
    jump, actually, to reset r2 (TOC+0x8000). */
    jump, actually, to reset r2 (TOC+0x8000). */
 struct ppc64_stub_entry
 struct ppc64_stub_entry
 {
 {
-	/* 28 byte jump instruction sequence (7 instructions) */
-	unsigned char jump[28];
-	unsigned char unused[4];
+	/* 28 byte jump instruction sequence (7 instructions). We only
+	 * need 6 instructions on ABIv2 but we always allocate 7 so
+	 * so we don't have to modify the trampoline load instruction. */
+	u32 jump[7];
+	u32 unused;
 	/* Data for the above code */
 	/* Data for the above code */
-	struct ppc64_opd_entry opd;
+	func_desc_t funcdata;
 };
 };
 
 
-/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external)
-   function which may be more than 24-bits away.  We could simply
-   patch the new r2 value and function pointer into the stub, but it's
-   significantly shorter to put these values at the end of the stub
-   code, and patch the stub address (32-bits relative to the TOC ptr,
-   r2) into the stub. */
-static struct ppc64_stub_entry ppc64_stub =
-{ .jump = {
-#ifdef __LITTLE_ENDIAN__
-	0x00, 0x00, 0x82, 0x3d, /* addis   r12,r2, <high> */
-	0x00, 0x00, 0x8c, 0x39, /* addi    r12,r12, <low> */
-	/* Save current r2 value in magic place on the stack. */
-	0x28, 0x00, 0x41, 0xf8, /* std     r2,40(r1) */
-	0x20, 0x00, 0x6c, 0xe9, /* ld      r11,32(r12) */
-	0x28, 0x00, 0x4c, 0xe8, /* ld      r2,40(r12) */
-	0xa6, 0x03, 0x69, 0x7d, /* mtctr   r11 */
-	0x20, 0x04, 0x80, 0x4e  /* bctr */
-#else
-	0x3d, 0x82, 0x00, 0x00, /* addis   r12,r2, <high> */
-	0x39, 0x8c, 0x00, 0x00, /* addi    r12,r12, <low> */
+/*
+ * PPC64 uses 24 bit jumps, but we need to jump into other modules or
+ * the kernel which may be further.  So we jump to a stub.
+ *
+ * For ELFv1 we need to use this to set up the new r2 value (aka TOC
+ * pointer).  For ELFv2 it's the callee's responsibility to set up the
+ * new r2, but for both we need to save the old r2.
+ *
+ * We could simply patch the new r2 value and function pointer into
+ * the stub, but it's significantly shorter to put these values at the
+ * end of the stub code, and patch the stub address (32-bits relative
+ * to the TOC ptr, r2) into the stub.
+ */
+
+static u32 ppc64_stub_insns[] = {
+	0x3d620000,			/* addis   r11,r2, <high> */
+	0x396b0000,			/* addi    r11,r11, <low> */
 	/* Save current r2 value in magic place on the stack. */
 	/* Save current r2 value in magic place on the stack. */
-	0xf8, 0x41, 0x00, 0x28, /* std     r2,40(r1) */
-	0xe9, 0x6c, 0x00, 0x20, /* ld      r11,32(r12) */
-	0xe8, 0x4c, 0x00, 0x28, /* ld      r2,40(r12) */
-	0x7d, 0x69, 0x03, 0xa6, /* mtctr   r11 */
-	0x4e, 0x80, 0x04, 0x20  /* bctr */
+	0xf8410000|R2_STACK_OFFSET,	/* std     r2,R2_STACK_OFFSET(r1) */
+	0xe98b0020,			/* ld      r12,32(r11) */
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+	/* Set up new r2 from function descriptor */
+	0xe84b0026,			/* ld      r2,40(r11) */
+#endif
+	0x7d8903a6,			/* mtctr   r12 */
+	0x4e800420			/* bctr */
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+static u32 ppc64_stub_mask[] = {
+	0xffff0000,
+	0xffff0000,
+	0xffffffff,
+	0xffffffff,
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+	0xffffffff,
+#endif
+	0xffffffff,
+	0xffffffff
+};
+
+bool is_module_trampoline(u32 *p)
+{
+	unsigned int i;
+	u32 insns[ARRAY_SIZE(ppc64_stub_insns)];
+
+	BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask));
+
+	if (probe_kernel_read(insns, p, sizeof(insns)))
+		return -EFAULT;
+
+	for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) {
+		u32 insna = insns[i];
+		u32 insnb = ppc64_stub_insns[i];
+		u32 mask = ppc64_stub_mask[i];
+
+		if ((insna & mask) != (insnb & mask))
+			return false;
+	}
+
+	return true;
+}
+
+int module_trampoline_target(struct module *mod, u32 *trampoline,
+			     unsigned long *target)
+{
+	u32 buf[2];
+	u16 upper, lower;
+	long offset;
+	void *toc_entry;
+
+	if (probe_kernel_read(buf, trampoline, sizeof(buf)))
+		return -EFAULT;
+
+	upper = buf[0] & 0xffff;
+	lower = buf[1] & 0xffff;
+
+	/* perform the addis/addi, both signed */
+	offset = ((short)upper << 16) + (short)lower;
+
+	/*
+	 * Now get the address this trampoline jumps to. This
+	 * is always 32 bytes into our trampoline stub.
+	 */
+	toc_entry = (void *)mod->arch.toc + offset + 32;
+
+	if (probe_kernel_read(target, toc_entry, sizeof(*target)))
+		return -EFAULT;
+
+	return 0;
+}
+
 #endif
 #endif
-} };
 
 
 /* Count how many different 24-bit relocations (different symbol,
 /* Count how many different 24-bit relocations (different symbol,
    different addend) */
    different addend) */
@@ -183,6 +308,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
 	return relocs * sizeof(struct ppc64_stub_entry);
 	return relocs * sizeof(struct ppc64_stub_entry);
 }
 }
 
 
+/* Still needed for ELFv2, for .TOC. */
 static void dedotify_versions(struct modversion_info *vers,
 static void dedotify_versions(struct modversion_info *vers,
 			      unsigned long size)
 			      unsigned long size)
 {
 {
@@ -193,7 +319,7 @@ static void dedotify_versions(struct modversion_info *vers,
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 			memmove(vers->name, vers->name+1, strlen(vers->name));
 }
 }
 
 
-/* Undefined symbols which refer to .funcname, hack to funcname */
+/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 {
 {
 	unsigned int i;
 	unsigned int i;
@@ -207,6 +333,24 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 	}
 	}
 }
 }
 
 
+static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
+			       const char *strtab,
+			       unsigned int symindex)
+{
+	unsigned int i, numsyms;
+	Elf64_Sym *syms;
+
+	syms = (Elf64_Sym *)sechdrs[symindex].sh_addr;
+	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
+
+	for (i = 1; i < numsyms; i++) {
+		if (syms[i].st_shndx == SHN_UNDEF
+		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
+			return &syms[i];
+	}
+	return NULL;
+}
+
 int module_frob_arch_sections(Elf64_Ehdr *hdr,
 int module_frob_arch_sections(Elf64_Ehdr *hdr,
 			      Elf64_Shdr *sechdrs,
 			      Elf64_Shdr *sechdrs,
 			      char *secstrings,
 			      char *secstrings,
@@ -271,21 +415,12 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
 /* Patch stub to reference function and correct r2 value. */
 /* Patch stub to reference function and correct r2 value. */
 static inline int create_stub(Elf64_Shdr *sechdrs,
 static inline int create_stub(Elf64_Shdr *sechdrs,
 			      struct ppc64_stub_entry *entry,
 			      struct ppc64_stub_entry *entry,
-			      struct ppc64_opd_entry *opd,
+			      unsigned long addr,
 			      struct module *me)
 			      struct module *me)
 {
 {
-	Elf64_Half *loc1, *loc2;
 	long reladdr;
 	long reladdr;
 
 
-	*entry = ppc64_stub;
-
-#ifdef __LITTLE_ENDIAN__
-	loc1 = (Elf64_Half *)&entry->jump[0];
-	loc2 = (Elf64_Half *)&entry->jump[4];
-#else
-	loc1 = (Elf64_Half *)&entry->jump[2];
-	loc2 = (Elf64_Half *)&entry->jump[6];
-#endif
+	memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
 
 
 	/* Stub uses address relative to r2. */
 	/* Stub uses address relative to r2. */
 	reladdr = (unsigned long)entry - my_r2(sechdrs, me);
 	reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -296,35 +431,33 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
 	}
 	}
 	DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
 	DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr);
 
 
-	*loc1 = PPC_HA(reladdr);
-	*loc2 = PPC_LO(reladdr);
-	entry->opd.funcaddr = opd->funcaddr;
-	entry->opd.r2 = opd->r2;
+	entry->jump[0] |= PPC_HA(reladdr);
+	entry->jump[1] |= PPC_LO(reladdr);
+	entry->funcdata = func_desc(addr);
 	return 1;
 	return 1;
 }
 }
 
 
-/* Create stub to jump to function described in this OPD: we need the
+/* Create stub to jump to function described in this OPD/ptr: we need the
    stub to set up the TOC ptr (r2) for the function. */
    stub to set up the TOC ptr (r2) for the function. */
 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
 static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
-				   unsigned long opdaddr,
+				   unsigned long addr,
 				   struct module *me)
 				   struct module *me)
 {
 {
 	struct ppc64_stub_entry *stubs;
 	struct ppc64_stub_entry *stubs;
-	struct ppc64_opd_entry *opd = (void *)opdaddr;
 	unsigned int i, num_stubs;
 	unsigned int i, num_stubs;
 
 
 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
 	num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs);
 
 
 	/* Find this stub, or if that fails, the next avail. entry */
 	/* Find this stub, or if that fails, the next avail. entry */
 	stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
 	stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr;
-	for (i = 0; stubs[i].opd.funcaddr; i++) {
+	for (i = 0; stub_func_addr(stubs[i].funcdata); i++) {
 		BUG_ON(i >= num_stubs);
 		BUG_ON(i >= num_stubs);
 
 
-		if (stubs[i].opd.funcaddr == opd->funcaddr)
+		if (stub_func_addr(stubs[i].funcdata) == func_addr(addr))
 			return (unsigned long)&stubs[i];
 			return (unsigned long)&stubs[i];
 	}
 	}
 
 
-	if (!create_stub(sechdrs, &stubs[i], opd, me))
+	if (!create_stub(sechdrs, &stubs[i], addr, me))
 		return 0;
 		return 0;
 
 
 	return (unsigned long)&stubs[i];
 	return (unsigned long)&stubs[i];
@@ -339,7 +472,8 @@ static int restore_r2(u32 *instruction, struct module *me)
 		       me->name, *instruction);
 		       me->name, *instruction);
 		return 0;
 		return 0;
 	}
 	}
-	*instruction = 0xe8410028;	/* ld r2,40(r1) */
+	/* ld r2,R2_STACK_OFFSET(r1) */
+	*instruction = 0xe8410000 | R2_STACK_OFFSET;
 	return 1;
 	return 1;
 }
 }
 
 
@@ -357,6 +491,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 
 
 	DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
 	DEBUGP("Applying ADD relocate section %u to %u\n", relsec,
 	       sechdrs[relsec].sh_info);
 	       sechdrs[relsec].sh_info);
+
+	/* First time we're called, we can fix up .TOC. */
+	if (!me->arch.toc_fixed) {
+		sym = find_dot_toc(sechdrs, strtab, symindex);
+		/* It's theoretically possible that a module doesn't want a
+		 * .TOC. so don't fail it just for that. */
+		if (sym)
+			sym->st_value = my_r2(sechdrs, me);
+		me->arch.toc_fixed = true;
+	}
+
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
 	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
 		/* This is where to make the change */
 		/* This is where to make the change */
 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -453,7 +598,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 					return -ENOENT;
 					return -ENOENT;
 				if (!restore_r2((u32 *)location + 1, me))
 				if (!restore_r2((u32 *)location + 1, me))
 					return -ENOEXEC;
 					return -ENOEXEC;
-			}
+			} else
+				value += local_entry_offset(sym);
 
 
 			/* Convert value to relative */
 			/* Convert value to relative */
 			value -= (unsigned long)location;
 			value -= (unsigned long)location;
@@ -474,6 +620,31 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
 			*location = value - (unsigned long)location;
 			*location = value - (unsigned long)location;
 			break;
 			break;
 
 
+		case R_PPC64_TOCSAVE:
+			/*
+			 * Marker reloc indicates we don't have to save r2.
+			 * That would only save us one instruction, so ignore
+			 * it.
+			 */
+			break;
+
+		case R_PPC64_REL16_HA:
+			/* Subtract location pointer */
+			value -= (unsigned long)location;
+			value = ((value + 0x8000) >> 16);
+			*((uint16_t *) location)
+				= (*((uint16_t *) location) & ~0xffff)
+				| (value & 0xffff);
+			break;
+
+		case R_PPC64_REL16_LO:
+			/* Subtract location pointer */
+			value -= (unsigned long)location;
+			*((uint16_t *) location)
+				= (*((uint16_t *) location) & ~0xffff)
+				| (value & 0xffff);
+			break;
+
 		default:
 		default:
 			printk("%s: Unknown ADD relocation: %lu\n",
 			printk("%s: Unknown ADD relocation: %lu\n",
 			       me->name,
 			       me->name,

+ 5 - 12
arch/powerpc/kernel/process.c

@@ -54,6 +54,7 @@
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #endif
 #endif
+#include <asm/code-patching.h>
 #include <linux/kprobes.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/kdebug.h>
 
 
@@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 		struct thread_info *ti = (void *)task_stack_page(p);
 		struct thread_info *ti = (void *)task_stack_page(p);
 		memset(childregs, 0, sizeof(struct pt_regs));
 		memset(childregs, 0, sizeof(struct pt_regs));
 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
-		childregs->gpr[14] = usp;	/* function */
+		/* function */
+		if (usp)
+			childregs->gpr[14] = ppc_function_entry((void *)usp);
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 		clear_tsk_thread_flag(p, TIF_32BIT);
 		clear_tsk_thread_flag(p, TIF_32BIT);
 		childregs->softe = 1;
 		childregs->softe = 1;
@@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
 		p->thread.ppr = INIT_PPR;
 		p->thread.ppr = INIT_PPR;
 #endif
 #endif
-	/*
-	 * The PPC64 ABI makes use of a TOC to contain function 
-	 * pointers.  The function (ret_from_except) is actually a pointer
-	 * to the TOC entry.  The first entry is a pointer to the actual
-	 * function.
-	 */
-#ifdef CONFIG_PPC64
-	kregs->nip = *((unsigned long *)f);
-#else
-	kregs->nip = (unsigned long)f;
-#endif
+	kregs->nip = ppc_function_entry(f);
 	return 0;
 	return 0;
 }
 }
 
 

+ 1 - 1
arch/powerpc/kernel/prom_init_check.sh

@@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
 opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
 opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
 boot_command_line __prom_init_toc_start __prom_init_toc_end
 boot_command_line __prom_init_toc_start __prom_init_toc_end
-btext_setup_display"
+btext_setup_display TOC."
 
 
 NM="$1"
 NM="$1"
 OBJ="$2"
 OBJ="$2"

+ 1 - 1
arch/powerpc/kernel/setup_64.c

@@ -341,7 +341,7 @@ void smp_release_cpus(void)
 
 
 	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
 	ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
 			- PHYSICAL_START);
 			- PHYSICAL_START);
-	*ptr = __pa(generic_secondary_smp_init);
+	*ptr = ppc_function_entry(generic_secondary_smp_init);
 
 
 	/* And wait a bit for them to catch up */
 	/* And wait a bit for them to catch up */
 	for (i = 0; i < 100000; i++) {
 	for (i = 0; i < 100000; i++) {

+ 11 - 7
arch/powerpc/kernel/systbl.S

@@ -17,12 +17,12 @@
 #include <asm/ppc_asm.h>
 #include <asm/ppc_asm.h>
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
-#define SYSCALL(func)		.llong	.sys_##func,.sys_##func
-#define COMPAT_SYS(func)	.llong	.sys_##func,.compat_sys_##func
-#define PPC_SYS(func)		.llong	.ppc_##func,.ppc_##func
-#define OLDSYS(func)		.llong	.sys_ni_syscall,.sys_ni_syscall
-#define SYS32ONLY(func)		.llong	.sys_ni_syscall,.compat_sys_##func
-#define SYSX(f, f3264, f32)	.llong	.f,.f3264
+#define SYSCALL(func)		.llong	DOTSYM(sys_##func),DOTSYM(sys_##func)
+#define COMPAT_SYS(func)	.llong	DOTSYM(sys_##func),DOTSYM(compat_sys_##func)
+#define PPC_SYS(func)		.llong	DOTSYM(ppc_##func),DOTSYM(ppc_##func)
+#define OLDSYS(func)		.llong	DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
+#define SYS32ONLY(func)		.llong	DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define SYSX(f, f3264, f32)	.llong	DOTSYM(f),DOTSYM(f3264)
 #else
 #else
 #define SYSCALL(func)		.long	sys_##func
 #define SYSCALL(func)		.long	sys_##func
 #define COMPAT_SYS(func)	.long	sys_##func
 #define COMPAT_SYS(func)	.long	sys_##func
@@ -36,6 +36,8 @@
 #define PPC_SYS_SPU(func)	PPC_SYS(func)
 #define PPC_SYS_SPU(func)	PPC_SYS(func)
 #define SYSX_SPU(f, f3264, f32)	SYSX(f, f3264, f32)
 #define SYSX_SPU(f, f3264, f32)	SYSX(f, f3264, f32)
 
 
+.section .rodata,"a"
+
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64
 #define sys_sigpending	sys_ni_syscall
 #define sys_sigpending	sys_ni_syscall
 #define sys_old_getrlimit sys_ni_syscall
 #define sys_old_getrlimit sys_ni_syscall
@@ -43,5 +45,7 @@
 	.p2align	3
 	.p2align	3
 #endif
 #endif
 
 
-_GLOBAL(sys_call_table)
+.globl sys_call_table
+sys_call_table:
+
 #include <asm/systbl.h>
 #include <asm/systbl.h>

+ 6 - 7
arch/powerpc/kernel/tm.S

@@ -42,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);		\
 /* Stack frame offsets for local variables. */
 /* Stack frame offsets for local variables. */
 #define TM_FRAME_L0	TM_FRAME_SIZE-16
 #define TM_FRAME_L0	TM_FRAME_SIZE-16
 #define TM_FRAME_L1	TM_FRAME_SIZE-8
 #define TM_FRAME_L1	TM_FRAME_SIZE-8
-#define STACK_PARAM(x)	(48+((x)*8))
 
 
 
 
 /* In order to access the TM SPRs, TM must be enabled.  So, do so: */
 /* In order to access the TM SPRs, TM must be enabled.  So, do so: */
@@ -109,12 +108,12 @@ _GLOBAL(tm_reclaim)
 	mflr	r0
 	mflr	r0
 	stw	r6, 8(r1)
 	stw	r6, 8(r1)
 	std	r0, 16(r1)
 	std	r0, 16(r1)
-	std	r2, 40(r1)
+	std	r2, STK_GOT(r1)
 	stdu	r1, -TM_FRAME_SIZE(r1)
 	stdu	r1, -TM_FRAME_SIZE(r1)
 
 
 	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
 	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */
 
 
-	std	r3, STACK_PARAM(0)(r1)
+	std	r3, STK_PARAM(R3)(r1)
 	SAVE_NVGPRS(r1)
 	SAVE_NVGPRS(r1)
 
 
 	/* We need to setup MSR for VSX register save instructions.  Here we
 	/* We need to setup MSR for VSX register save instructions.  Here we
@@ -210,7 +209,7 @@ dont_backup_fp:
 	/* Now get some more GPRS free */
 	/* Now get some more GPRS free */
 	std	r7, GPR7(r1)			/* Temporary stash */
 	std	r7, GPR7(r1)			/* Temporary stash */
 	std	r12, GPR12(r1)			/* ''   ''    ''   */
 	std	r12, GPR12(r1)			/* ''   ''    ''   */
-	ld	r12, STACK_PARAM(0)(r1)		/* Param 0, thread_struct * */
+	ld	r12, STK_PARAM(R3)(r1)		/* Param 0, thread_struct * */
 
 
 	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */
 	std	r11, THREAD_TM_PPR(r12)		/* Store PPR and free r11 */
 
 
@@ -297,7 +296,7 @@ dont_backup_fp:
 	ld	r0, 16(r1)
 	ld	r0, 16(r1)
 	mtcr	r4
 	mtcr	r4
 	mtlr	r0
 	mtlr	r0
-	ld	r2, 40(r1)
+	ld	r2, STK_GOT(r1)
 
 
 	/* Load system default DSCR */
 	/* Load system default DSCR */
 	ld	r4, DSCR_DEFAULT@toc(r2)
 	ld	r4, DSCR_DEFAULT@toc(r2)
@@ -320,7 +319,7 @@ _GLOBAL(__tm_recheckpoint)
 	mflr	r0
 	mflr	r0
 	stw	r5, 8(r1)
 	stw	r5, 8(r1)
 	std	r0, 16(r1)
 	std	r0, 16(r1)
-	std	r2, 40(r1)
+	std	r2, STK_GOT(r1)
 	stdu	r1, -TM_FRAME_SIZE(r1)
 	stdu	r1, -TM_FRAME_SIZE(r1)
 
 
 	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
 	/* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
@@ -478,7 +477,7 @@ restore_gprs:
 	ld	r0, 16(r1)
 	ld	r0, 16(r1)
 	mtcr	r4
 	mtcr	r4
 	mtlr	r0
 	mtlr	r0
-	ld	r2, 40(r1)
+	ld	r2, STK_GOT(r1)
 
 
 	/* Load system default DSCR */
 	/* Load system default DSCR */
 	ld	r4, DSCR_DEFAULT@toc(r2)
 	ld	r4, DSCR_DEFAULT@toc(r2)

+ 1 - 1
arch/powerpc/kvm/book3s_hv_interrupts.S

@@ -171,7 +171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
 
 
 	/* Jump to partition switch code */
 	/* Jump to partition switch code */
-	bl	.kvmppc_hv_entry_trampoline
+	bl	kvmppc_hv_entry_trampoline
 	nop
 	nop
 
 
 /*
 /*

+ 17 - 17
arch/powerpc/kvm/book3s_hv_rmhandlers.S

@@ -1658,7 +1658,7 @@ kvmppc_hdsi:
 	/* Search the hash table. */
 	/* Search the hash table. */
 	mr	r3, r9			/* vcpu pointer */
 	mr	r3, r9			/* vcpu pointer */
 	li	r7, 1			/* data fault */
 	li	r7, 1			/* data fault */
-	bl	.kvmppc_hpte_hv_fault
+	bl	kvmppc_hpte_hv_fault
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r10, VCPU_PC(r9)
 	ld	r10, VCPU_PC(r9)
 	ld	r11, VCPU_MSR(r9)
 	ld	r11, VCPU_MSR(r9)
@@ -1732,7 +1732,7 @@ kvmppc_hisi:
 	mr	r4, r10
 	mr	r4, r10
 	mr	r6, r11
 	mr	r6, r11
 	li	r7, 0			/* instruction fault */
 	li	r7, 0			/* instruction fault */
-	bl	.kvmppc_hpte_hv_fault
+	bl	kvmppc_hpte_hv_fault
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r10, VCPU_PC(r9)
 	ld	r10, VCPU_PC(r9)
 	ld	r11, VCPU_MSR(r9)
 	ld	r11, VCPU_MSR(r9)
@@ -1806,16 +1806,16 @@ hcall_real_fallback:
 	.globl	hcall_real_table
 	.globl	hcall_real_table
 hcall_real_table:
 hcall_real_table:
 	.long	0		/* 0 - unused */
 	.long	0		/* 0 - unused */
-	.long	.kvmppc_h_remove - hcall_real_table
-	.long	.kvmppc_h_enter - hcall_real_table
-	.long	.kvmppc_h_read - hcall_real_table
+	.long	DOTSYM(kvmppc_h_remove) - hcall_real_table
+	.long	DOTSYM(kvmppc_h_enter) - hcall_real_table
+	.long	DOTSYM(kvmppc_h_read) - hcall_real_table
 	.long	0		/* 0x10 - H_CLEAR_MOD */
 	.long	0		/* 0x10 - H_CLEAR_MOD */
 	.long	0		/* 0x14 - H_CLEAR_REF */
 	.long	0		/* 0x14 - H_CLEAR_REF */
-	.long	.kvmppc_h_protect - hcall_real_table
-	.long	.kvmppc_h_get_tce - hcall_real_table
-	.long	.kvmppc_h_put_tce - hcall_real_table
+	.long	DOTSYM(kvmppc_h_protect) - hcall_real_table
+	.long	DOTSYM(kvmppc_h_get_tce) - hcall_real_table
+	.long	DOTSYM(kvmppc_h_put_tce) - hcall_real_table
 	.long	0		/* 0x24 - H_SET_SPRG0 */
 	.long	0		/* 0x24 - H_SET_SPRG0 */
-	.long	.kvmppc_h_set_dabr - hcall_real_table
+	.long	DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
 	.long	0		/* 0x2c */
 	.long	0		/* 0x2c */
 	.long	0		/* 0x30 */
 	.long	0		/* 0x30 */
 	.long	0		/* 0x34 */
 	.long	0		/* 0x34 */
@@ -1831,11 +1831,11 @@ hcall_real_table:
 	.long	0		/* 0x5c */
 	.long	0		/* 0x5c */
 	.long	0		/* 0x60 */
 	.long	0		/* 0x60 */
 #ifdef CONFIG_KVM_XICS
 #ifdef CONFIG_KVM_XICS
-	.long	.kvmppc_rm_h_eoi - hcall_real_table
-	.long	.kvmppc_rm_h_cppr - hcall_real_table
-	.long	.kvmppc_rm_h_ipi - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
 	.long	0		/* 0x70 - H_IPOLL */
 	.long	0		/* 0x70 - H_IPOLL */
-	.long	.kvmppc_rm_h_xirr - hcall_real_table
+	.long	DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
 #else
 #else
 	.long	0		/* 0x64 - H_EOI */
 	.long	0		/* 0x64 - H_EOI */
 	.long	0		/* 0x68 - H_CPPR */
 	.long	0		/* 0x68 - H_CPPR */
@@ -1869,7 +1869,7 @@ hcall_real_table:
 	.long	0		/* 0xd4 */
 	.long	0		/* 0xd4 */
 	.long	0		/* 0xd8 */
 	.long	0		/* 0xd8 */
 	.long	0		/* 0xdc */
 	.long	0		/* 0xdc */
-	.long	.kvmppc_h_cede - hcall_real_table
+	.long	DOTSYM(kvmppc_h_cede) - hcall_real_table
 	.long	0		/* 0xe4 */
 	.long	0		/* 0xe4 */
 	.long	0		/* 0xe8 */
 	.long	0		/* 0xe8 */
 	.long	0		/* 0xec */
 	.long	0		/* 0xec */
@@ -1886,11 +1886,11 @@ hcall_real_table:
 	.long	0		/* 0x118 */
 	.long	0		/* 0x118 */
 	.long	0		/* 0x11c */
 	.long	0		/* 0x11c */
 	.long	0		/* 0x120 */
 	.long	0		/* 0x120 */
-	.long	.kvmppc_h_bulk_remove - hcall_real_table
+	.long	DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
 	.long	0		/* 0x128 */
 	.long	0		/* 0x128 */
 	.long	0		/* 0x12c */
 	.long	0		/* 0x12c */
 	.long	0		/* 0x130 */
 	.long	0		/* 0x130 */
-	.long	.kvmppc_h_set_xdabr - hcall_real_table
+	.long	DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
 hcall_real_table_end:
 hcall_real_table_end:
 
 
 ignore_hdec:
 ignore_hdec:
@@ -2115,7 +2115,7 @@ kvm_cede_exit:
 	/* Try to handle a machine check in real mode */
 	/* Try to handle a machine check in real mode */
 machine_check_realmode:
 machine_check_realmode:
 	mr	r3, r9		/* get vcpu pointer */
 	mr	r3, r9		/* get vcpu pointer */
-	bl	.kvmppc_realmode_machine_check
+	bl	kvmppc_realmode_machine_check
 	nop
 	nop
 	cmpdi	r3, 0		/* continue exiting from guest? */
 	cmpdi	r3, 0		/* continue exiting from guest? */
 	ld	r9, HSTATE_KVM_VCPU(r13)
 	ld	r9, HSTATE_KVM_VCPU(r13)

+ 1 - 1
arch/powerpc/lib/copypage_64.S

@@ -20,7 +20,7 @@ _GLOBAL(copy_page)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	lis	r5,PAGE_SIZE@h
 	lis	r5,PAGE_SIZE@h
 FTR_SECTION_ELSE
 FTR_SECTION_ELSE
-	b	.copypage_power7
+	b	copypage_power7
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
 	ori	r5,r5,PAGE_SIZE@l
 	ori	r5,r5,PAGE_SIZE@l
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION

+ 6 - 6
arch/powerpc/lib/copypage_power7.S

@@ -56,15 +56,15 @@ _GLOBAL(copypage_power7)
 
 
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 	mflr	r0
 	mflr	r0
-	std	r3,48(r1)
-	std	r4,56(r1)
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	.enter_vmx_copy
+	bl	enter_vmx_copy
 	cmpwi	r3,0
 	cmpwi	r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r0,STACKFRAMESIZE+16(r1)
-	ld	r3,STACKFRAMESIZE+48(r1)
-	ld	r4,STACKFRAMESIZE+56(r1)
+	ld	r3,STK_REG(R31)(r1)
+	ld	r4,STK_REG(R30)(r1)
 	mtlr	r0
 	mtlr	r0
 
 
 	li	r0,(PAGE_SIZE/128)
 	li	r0,(PAGE_SIZE/128)
@@ -103,7 +103,7 @@ _GLOBAL(copypage_power7)
 	addi	r3,r3,128
 	addi	r3,r3,128
 	bdnz	1b
 	bdnz	1b
 
 
-	b	.exit_vmx_copy		/* tail call optimise */
+	b	exit_vmx_copy		/* tail call optimise */
 
 
 #else
 #else
 	li	r0,(PAGE_SIZE/128)
 	li	r0,(PAGE_SIZE/128)

+ 1 - 1
arch/powerpc/lib/copyuser_64.S

@@ -18,7 +18,7 @@
 #endif
 #endif
 
 
 	.align	7
 	.align	7
-_GLOBAL(__copy_tofrom_user)
+_GLOBAL_TOC(__copy_tofrom_user)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	nop
 	nop
 FTR_SECTION_ELSE
 FTR_SECTION_ELSE

+ 16 - 16
arch/powerpc/lib/copyuser_power7.S

@@ -66,7 +66,7 @@
 	ld	r15,STK_REG(R15)(r1)
 	ld	r15,STK_REG(R15)(r1)
 	ld	r14,STK_REG(R14)(r1)
 	ld	r14,STK_REG(R14)(r1)
 .Ldo_err3:
 .Ldo_err3:
-	bl	.exit_vmx_usercopy
+	bl	exit_vmx_usercopy
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r0,STACKFRAMESIZE+16(r1)
 	mtlr	r0
 	mtlr	r0
 	b	.Lexit
 	b	.Lexit
@@ -85,9 +85,9 @@
 .Lexit:
 .Lexit:
 	addi	r1,r1,STACKFRAMESIZE
 	addi	r1,r1,STACKFRAMESIZE
 .Ldo_err1:
 .Ldo_err1:
-	ld	r3,48(r1)
-	ld	r4,56(r1)
-	ld	r5,64(r1)
+	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	ld	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
+	ld	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
 	b	__copy_tofrom_user_base
 	b	__copy_tofrom_user_base
 
 
 
 
@@ -96,18 +96,18 @@ _GLOBAL(__copy_tofrom_user_power7)
 	cmpldi	r5,16
 	cmpldi	r5,16
 	cmpldi	cr1,r5,4096
 	cmpldi	cr1,r5,4096
 
 
-	std	r3,48(r1)
-	std	r4,56(r1)
-	std	r5,64(r1)
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
+	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
 
 
 	blt	.Lshort_copy
 	blt	.Lshort_copy
 	bgt	cr1,.Lvmx_copy
 	bgt	cr1,.Lvmx_copy
 #else
 #else
 	cmpldi	r5,16
 	cmpldi	r5,16
 
 
-	std	r3,48(r1)
-	std	r4,56(r1)
-	std	r5,64(r1)
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
+	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
 
 
 	blt	.Lshort_copy
 	blt	.Lshort_copy
 #endif
 #endif
@@ -295,12 +295,12 @@ err1;	stb	r0,0(r3)
 	mflr	r0
 	mflr	r0
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	.enter_vmx_usercopy
+	bl	enter_vmx_usercopy
 	cmpwi	cr1,r3,0
 	cmpwi	cr1,r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r0,STACKFRAMESIZE+16(r1)
-	ld	r3,STACKFRAMESIZE+48(r1)
-	ld	r4,STACKFRAMESIZE+56(r1)
-	ld	r5,STACKFRAMESIZE+64(r1)
+	ld	r3,STK_REG(R31)(r1)
+	ld	r4,STK_REG(R30)(r1)
+	ld	r5,STK_REG(R29)(r1)
 	mtlr	r0
 	mtlr	r0
 
 
 	/*
 	/*
@@ -514,7 +514,7 @@ err3;	lbz	r0,0(r4)
 err3;	stb	r0,0(r3)
 err3;	stb	r0,0(r3)
 
 
 15:	addi	r1,r1,STACKFRAMESIZE
 15:	addi	r1,r1,STACKFRAMESIZE
-	b	.exit_vmx_usercopy	/* tail call optimise */
+	b	exit_vmx_usercopy	/* tail call optimise */
 
 
 .Lvmx_unaligned_copy:
 .Lvmx_unaligned_copy:
 	/* Get the destination 16B aligned */
 	/* Get the destination 16B aligned */
@@ -717,5 +717,5 @@ err3;	lbz	r0,0(r4)
 err3;	stb	r0,0(r3)
 err3;	stb	r0,0(r3)
 
 
 15:	addi	r1,r1,STACKFRAMESIZE
 15:	addi	r1,r1,STACKFRAMESIZE
-	b	.exit_vmx_usercopy	/* tail call optimise */
+	b	exit_vmx_usercopy	/* tail call optimise */
 #endif /* CONFiG_ALTIVEC */
 #endif /* CONFiG_ALTIVEC */

+ 4 - 4
arch/powerpc/lib/hweight_64.S

@@ -24,7 +24,7 @@
 
 
 _GLOBAL(__arch_hweight8)
 _GLOBAL(__arch_hweight8)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
-	b .__sw_hweight8
+	b __sw_hweight8
 	nop
 	nop
 	nop
 	nop
 FTR_SECTION_ELSE
 FTR_SECTION_ELSE
@@ -35,7 +35,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
 
 
 _GLOBAL(__arch_hweight16)
 _GLOBAL(__arch_hweight16)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
-	b .__sw_hweight16
+	b __sw_hweight16
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop
@@ -57,7 +57,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
 
 
 _GLOBAL(__arch_hweight32)
 _GLOBAL(__arch_hweight32)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
-	b .__sw_hweight32
+	b __sw_hweight32
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop
@@ -82,7 +82,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
 
 
 _GLOBAL(__arch_hweight64)
 _GLOBAL(__arch_hweight64)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
-	b .__sw_hweight64
+	b __sw_hweight64
 	nop
 	nop
 	nop
 	nop
 	nop
 	nop

+ 2 - 2
arch/powerpc/lib/mem_64.S

@@ -79,8 +79,8 @@ _GLOBAL(memset)
 
 
 _GLOBAL(memmove)
 _GLOBAL(memmove)
 	cmplw	0,r3,r4
 	cmplw	0,r3,r4
-	bgt	.backwards_memcpy
-	b	.memcpy
+	bgt	backwards_memcpy
+	b	memcpy
 
 
 _GLOBAL(backwards_memcpy)
 _GLOBAL(backwards_memcpy)
 	rlwinm.	r7,r5,32-3,3,31		/* r0 = r5 >> 3 */
 	rlwinm.	r7,r5,32-3,3,31		/* r0 = r5 >> 3 */

+ 5 - 5
arch/powerpc/lib/memcpy_64.S

@@ -10,12 +10,12 @@
 #include <asm/ppc_asm.h>
 #include <asm/ppc_asm.h>
 
 
 	.align	7
 	.align	7
-_GLOBAL(memcpy)
+_GLOBAL_TOC(memcpy)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 #ifdef __LITTLE_ENDIAN__
 #ifdef __LITTLE_ENDIAN__
 	cmpdi	cr7,r5,0
 	cmpdi	cr7,r5,0
 #else
 #else
-	std	r3,48(r1)	/* save destination pointer for return value */
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* save destination pointer for return value */
 #endif
 #endif
 FTR_SECTION_ELSE
 FTR_SECTION_ELSE
 #ifndef SELFTEST
 #ifndef SELFTEST
@@ -88,7 +88,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 2:	bf	cr7*4+3,3f
 2:	bf	cr7*4+3,3f
 	lbz	r9,8(r4)
 	lbz	r9,8(r4)
 	stb	r9,0(r3)
 	stb	r9,0(r3)
-3:	ld	r3,48(r1)	/* return dest pointer */
+3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
 	blr
 	blr
 
 
 .Lsrc_unaligned:
 .Lsrc_unaligned:
@@ -171,7 +171,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 2:	bf	cr7*4+3,3f
 2:	bf	cr7*4+3,3f
 	rotldi	r9,r9,8
 	rotldi	r9,r9,8
 	stb	r9,0(r3)
 	stb	r9,0(r3)
-3:	ld	r3,48(r1)	/* return dest pointer */
+3:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
 	blr
 	blr
 
 
 .Ldst_unaligned:
 .Ldst_unaligned:
@@ -216,6 +216,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
 3:	bf	cr7*4+3,4f
 3:	bf	cr7*4+3,4f
 	lbz	r0,0(r4)
 	lbz	r0,0(r4)
 	stb	r0,0(r3)
 	stb	r0,0(r3)
-4:	ld	r3,48(r1)	/* return dest pointer */
+4:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)	/* return dest pointer */
 	blr
 	blr
 #endif
 #endif

+ 13 - 13
arch/powerpc/lib/memcpy_power7.S

@@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7)
 	cmpldi	r5,16
 	cmpldi	r5,16
 	cmpldi	cr1,r5,4096
 	cmpldi	cr1,r5,4096
 
 
-	std	r3,48(r1)
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
 
 
 	blt	.Lshort_copy
 	blt	.Lshort_copy
 	bgt	cr1,.Lvmx_copy
 	bgt	cr1,.Lvmx_copy
 #else
 #else
 	cmpldi	r5,16
 	cmpldi	r5,16
 
 
-	std	r3,48(r1)
+	std	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
 
 
 	blt	.Lshort_copy
 	blt	.Lshort_copy
 #endif
 #endif
@@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7)
 	lbz	r0,0(r4)
 	lbz	r0,0(r4)
 	stb	r0,0(r3)
 	stb	r0,0(r3)
 
 
-15:	ld	r3,48(r1)
+15:	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
 	blr
 	blr
 
 
 .Lunwind_stack_nonvmx_copy:
 .Lunwind_stack_nonvmx_copy:
@@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7)
 #ifdef CONFIG_ALTIVEC
 #ifdef CONFIG_ALTIVEC
 .Lvmx_copy:
 .Lvmx_copy:
 	mflr	r0
 	mflr	r0
-	std	r4,56(r1)
-	std	r5,64(r1)
+	std	r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
+	std	r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
 	std	r0,16(r1)
 	std	r0,16(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
 	stdu	r1,-STACKFRAMESIZE(r1)
-	bl	.enter_vmx_copy
+	bl	enter_vmx_copy
 	cmpwi	cr1,r3,0
 	cmpwi	cr1,r3,0
 	ld	r0,STACKFRAMESIZE+16(r1)
 	ld	r0,STACKFRAMESIZE+16(r1)
-	ld	r3,STACKFRAMESIZE+48(r1)
-	ld	r4,STACKFRAMESIZE+56(r1)
-	ld	r5,STACKFRAMESIZE+64(r1)
+	ld	r3,STK_REG(R31)(r1)
+	ld	r4,STK_REG(R30)(r1)
+	ld	r5,STK_REG(R29)(r1)
 	mtlr	r0
 	mtlr	r0
 
 
 	/*
 	/*
@@ -447,8 +447,8 @@ _GLOBAL(memcpy_power7)
 	stb	r0,0(r3)
 	stb	r0,0(r3)
 
 
 15:	addi	r1,r1,STACKFRAMESIZE
 15:	addi	r1,r1,STACKFRAMESIZE
-	ld	r3,48(r1)
-	b	.exit_vmx_copy		/* tail call optimise */
+	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	b	exit_vmx_copy		/* tail call optimise */
 
 
 .Lvmx_unaligned_copy:
 .Lvmx_unaligned_copy:
 	/* Get the destination 16B aligned */
 	/* Get the destination 16B aligned */
@@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7)
 	stb	r0,0(r3)
 	stb	r0,0(r3)
 
 
 15:	addi	r1,r1,STACKFRAMESIZE
 15:	addi	r1,r1,STACKFRAMESIZE
-	ld	r3,48(r1)
-	b	.exit_vmx_copy		/* tail call optimise */
+	ld	r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
+	b	exit_vmx_copy		/* tail call optimise */
 #endif /* CONFiG_ALTIVEC */
 #endif /* CONFiG_ALTIVEC */

+ 28 - 16
arch/powerpc/mm/hash_low_64.S

@@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	mr	r4,r30
 	mr	r4,r30
 	mr	r5,r7
 	mr	r5,r7
-	bl	.hash_page_do_lazy_icache
+	bl	hash_page_do_lazy_icache
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 
 
 	/* At this point, r3 contains new PP bits, save them in
 	/* At this point, r3 contains new PP bits, save them in
@@ -201,7 +201,8 @@ htab_insert_pte:
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(htab_call_hpte_insert1)
+.globl htab_call_hpte_insert1
+htab_call_hpte_insert1:
 	bl	.			/* Patched by htab_finish_init() */
 	bl	.			/* Patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge	htab_pte_insert_ok	/* Insertion successful */
 	bge	htab_pte_insert_ok	/* Insertion successful */
@@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1)
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(htab_call_hpte_insert2)
+.globl htab_call_hpte_insert2
+htab_call_hpte_insert2:
 	bl	.			/* Patched by htab_finish_init() */
 	bl	.			/* Patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge+	htab_pte_insert_ok	/* Insertion successful */
 	bge+	htab_pte_insert_ok	/* Insertion successful */
@@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2)
 2:	and	r0,r5,r27
 2:	and	r0,r5,r27
 	rldicr	r3,r0,3,63-3	/* r0 = (hash & mask) << 3 */	
 	rldicr	r3,r0,3,63-3	/* r0 = (hash & mask) << 3 */	
 	/* Call ppc_md.hpte_remove */
 	/* Call ppc_md.hpte_remove */
-_GLOBAL(htab_call_hpte_remove)
+.globl htab_call_hpte_remove
+htab_call_hpte_remove:
 	bl	.			/* Patched by htab_finish_init() */
 	bl	.			/* Patched by htab_finish_init() */
 
 
 	/* Try all again */
 	/* Try all again */
@@ -296,7 +299,8 @@ htab_modify_pte:
 	li	r7,MMU_PAGE_4K		/* actual page size */
 	li	r7,MMU_PAGE_4K		/* actual page size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
-_GLOBAL(htab_call_hpte_updatepp)
+.globl htab_call_hpte_updatepp
+htab_call_hpte_updatepp:
 	bl	.			/* Patched by htab_finish_init() */
 	bl	.			/* Patched by htab_finish_init() */
 
 
 	/* if we failed because typically the HPTE wasn't really here
 	/* if we failed because typically the HPTE wasn't really here
@@ -471,7 +475,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	mr	r4,r30
 	mr	r4,r30
 	mr	r5,r7
 	mr	r5,r7
-	bl	.hash_page_do_lazy_icache
+	bl	hash_page_do_lazy_icache
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 
 
 	/* At this point, r3 contains new PP bits, save them in
 	/* At this point, r3 contains new PP bits, save them in
@@ -526,7 +530,8 @@ htab_special_pfn:
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(htab_call_hpte_insert1)
+.globl htab_call_hpte_insert1
+htab_call_hpte_insert1:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge	htab_pte_insert_ok	/* Insertion successful */
 	bge	htab_pte_insert_ok	/* Insertion successful */
@@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1)
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r8,MMU_PAGE_4K		/* page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	li	r9,MMU_PAGE_4K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(htab_call_hpte_insert2)
+.globl htab_call_hpte_insert2
+htab_call_hpte_insert2:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge+	htab_pte_insert_ok	/* Insertion successful */
 	bge+	htab_pte_insert_ok	/* Insertion successful */
@@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2)
 2:	and	r0,r5,r27
 2:	and	r0,r5,r27
 	rldicr	r3,r0,3,63-3		/* r0 = (hash & mask) << 3 */
 	rldicr	r3,r0,3,63-3		/* r0 = (hash & mask) << 3 */
 	/* Call ppc_md.hpte_remove */
 	/* Call ppc_md.hpte_remove */
-_GLOBAL(htab_call_hpte_remove)
+.globl htab_call_hpte_remove
+htab_call_hpte_remove:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 
 
 	/* Try all again */
 	/* Try all again */
@@ -588,7 +595,7 @@ htab_inval_old_hpte:
 	li	r6,MMU_PAGE_64K		/* psize */
 	li	r6,MMU_PAGE_64K		/* psize */
 	ld	r7,STK_PARAM(R9)(r1)	/* ssize */
 	ld	r7,STK_PARAM(R9)(r1)	/* ssize */
 	ld	r8,STK_PARAM(R8)(r1)	/* local */
 	ld	r8,STK_PARAM(R8)(r1)	/* local */
-	bl	.flush_hash_page
+	bl	flush_hash_page
 	/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
 	/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
 	lis	r0,_PAGE_HPTE_SUB@h
 	lis	r0,_PAGE_HPTE_SUB@h
 	ori	r0,r0,_PAGE_HPTE_SUB@l
 	ori	r0,r0,_PAGE_HPTE_SUB@l
@@ -660,7 +667,8 @@ htab_modify_pte:
 	li	r7,MMU_PAGE_4K		/* actual page size */
 	li	r7,MMU_PAGE_4K		/* actual page size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
-_GLOBAL(htab_call_hpte_updatepp)
+.globl htab_call_hpte_updatepp
+htab_call_hpte_updatepp:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 
 
 	/* if we failed because typically the HPTE wasn't really here
 	/* if we failed because typically the HPTE wasn't really here
@@ -812,7 +820,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 BEGIN_FTR_SECTION
 BEGIN_FTR_SECTION
 	mr	r4,r30
 	mr	r4,r30
 	mr	r5,r7
 	mr	r5,r7
-	bl	.hash_page_do_lazy_icache
+	bl	hash_page_do_lazy_icache
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
 
 
 	/* At this point, r3 contains new PP bits, save them in
 	/* At this point, r3 contains new PP bits, save them in
@@ -857,7 +865,8 @@ ht64_insert_pte:
 	li	r8,MMU_PAGE_64K
 	li	r8,MMU_PAGE_64K
 	li	r9,MMU_PAGE_64K		/* actual page size */
 	li	r9,MMU_PAGE_64K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(ht64_call_hpte_insert1)
+.globl ht64_call_hpte_insert1
+ht64_call_hpte_insert1:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge	ht64_pte_insert_ok	/* Insertion successful */
 	bge	ht64_pte_insert_ok	/* Insertion successful */
@@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1)
 	li	r8,MMU_PAGE_64K
 	li	r8,MMU_PAGE_64K
 	li	r9,MMU_PAGE_64K		/* actual page size */
 	li	r9,MMU_PAGE_64K		/* actual page size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r10,STK_PARAM(R9)(r1)	/* segment size */
-_GLOBAL(ht64_call_hpte_insert2)
+.globl ht64_call_hpte_insert2
+ht64_call_hpte_insert2:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 	cmpdi	0,r3,0
 	cmpdi	0,r3,0
 	bge+	ht64_pte_insert_ok	/* Insertion successful */
 	bge+	ht64_pte_insert_ok	/* Insertion successful */
@@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2)
 2:	and	r0,r5,r27
 2:	and	r0,r5,r27
 	rldicr	r3,r0,3,63-3	/* r0 = (hash & mask) << 3 */
 	rldicr	r3,r0,3,63-3	/* r0 = (hash & mask) << 3 */
 	/* Call ppc_md.hpte_remove */
 	/* Call ppc_md.hpte_remove */
-_GLOBAL(ht64_call_hpte_remove)
+.globl ht64_call_hpte_remove
+ht64_call_hpte_remove:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 
 
 	/* Try all again */
 	/* Try all again */
@@ -952,7 +963,8 @@ ht64_modify_pte:
 	li	r7,MMU_PAGE_64K		/* actual page size */
 	li	r7,MMU_PAGE_64K		/* actual page size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r8,STK_PARAM(R9)(r1)	/* segment size */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
 	ld	r9,STK_PARAM(R8)(r1)	/* get "local" param */
-_GLOBAL(ht64_call_hpte_updatepp)
+.globl ht64_call_hpte_updatepp
+ht64_call_hpte_updatepp:
 	bl	.			/* patched by htab_finish_init() */
 	bl	.			/* patched by htab_finish_init() */
 
 
 	/* if we failed because typically the HPTE wasn't really here
 	/* if we failed because typically the HPTE wasn't really here

+ 16 - 20
arch/powerpc/mm/hash_utils_64.c

@@ -622,47 +622,43 @@ int remove_section_mapping(unsigned long start, unsigned long end)
 }
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 
-#define FUNCTION_TEXT(A)	((*(unsigned long *)(A)))
+extern u32 htab_call_hpte_insert1[];
+extern u32 htab_call_hpte_insert2[];
+extern u32 htab_call_hpte_remove[];
+extern u32 htab_call_hpte_updatepp[];
+extern u32 ht64_call_hpte_insert1[];
+extern u32 ht64_call_hpte_insert2[];
+extern u32 ht64_call_hpte_remove[];
+extern u32 ht64_call_hpte_updatepp[];
 
 
 static void __init htab_finish_init(void)
 static void __init htab_finish_init(void)
 {
 {
-	extern unsigned int *htab_call_hpte_insert1;
-	extern unsigned int *htab_call_hpte_insert2;
-	extern unsigned int *htab_call_hpte_remove;
-	extern unsigned int *htab_call_hpte_updatepp;
-
 #ifdef CONFIG_PPC_HAS_HASH_64K
 #ifdef CONFIG_PPC_HAS_HASH_64K
-	extern unsigned int *ht64_call_hpte_insert1;
-	extern unsigned int *ht64_call_hpte_insert2;
-	extern unsigned int *ht64_call_hpte_remove;
-	extern unsigned int *ht64_call_hpte_updatepp;
-
 	patch_branch(ht64_call_hpte_insert1,
 	patch_branch(ht64_call_hpte_insert1,
-		FUNCTION_TEXT(ppc_md.hpte_insert),
+		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(ht64_call_hpte_insert2,
 	patch_branch(ht64_call_hpte_insert2,
-		FUNCTION_TEXT(ppc_md.hpte_insert),
+		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(ht64_call_hpte_remove,
 	patch_branch(ht64_call_hpte_remove,
-		FUNCTION_TEXT(ppc_md.hpte_remove),
+		ppc_function_entry(ppc_md.hpte_remove),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(ht64_call_hpte_updatepp,
 	patch_branch(ht64_call_hpte_updatepp,
-		FUNCTION_TEXT(ppc_md.hpte_updatepp),
+		ppc_function_entry(ppc_md.hpte_updatepp),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
-
 #endif /* CONFIG_PPC_HAS_HASH_64K */
 #endif /* CONFIG_PPC_HAS_HASH_64K */
 
 
 	patch_branch(htab_call_hpte_insert1,
 	patch_branch(htab_call_hpte_insert1,
-		FUNCTION_TEXT(ppc_md.hpte_insert),
+		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(htab_call_hpte_insert2,
 	patch_branch(htab_call_hpte_insert2,
-		FUNCTION_TEXT(ppc_md.hpte_insert),
+		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(htab_call_hpte_remove,
 	patch_branch(htab_call_hpte_remove,
-		FUNCTION_TEXT(ppc_md.hpte_remove),
+		ppc_function_entry(ppc_md.hpte_remove),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 	patch_branch(htab_call_hpte_updatepp,
 	patch_branch(htab_call_hpte_updatepp,
-		FUNCTION_TEXT(ppc_md.hpte_updatepp),
+		ppc_function_entry(ppc_md.hpte_updatepp),
 		BRANCH_SET_LINK);
 		BRANCH_SET_LINK);
 }
 }
 
 

+ 6 - 6
arch/powerpc/mm/slb.c

@@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
 	patch_instruction(insn_addr, insn);
 	patch_instruction(insn_addr, insn);
 }
 }
 
 
+extern u32 slb_compare_rr_to_size[];
+extern u32 slb_miss_kernel_load_linear[];
+extern u32 slb_miss_kernel_load_io[];
+extern u32 slb_compare_rr_to_size[];
+extern u32 slb_miss_kernel_load_vmemmap[];
+
 void slb_set_size(u16 size)
 void slb_set_size(u16 size)
 {
 {
-	extern unsigned int *slb_compare_rr_to_size;
-
 	if (mmu_slb_size == size)
 	if (mmu_slb_size == size)
 		return;
 		return;
 
 
@@ -272,11 +276,7 @@ void slb_initialize(void)
 	unsigned long linear_llp, vmalloc_llp, io_llp;
 	unsigned long linear_llp, vmalloc_llp, io_llp;
 	unsigned long lflags, vflags;
 	unsigned long lflags, vflags;
 	static int slb_encoding_inited;
 	static int slb_encoding_inited;
-	extern unsigned int *slb_miss_kernel_load_linear;
-	extern unsigned int *slb_miss_kernel_load_io;
-	extern unsigned int *slb_compare_rr_to_size;
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-	extern unsigned int *slb_miss_kernel_load_vmemmap;
 	unsigned long vmemmap_llp;
 	unsigned long vmemmap_llp;
 #endif
 #endif
 
 

+ 8 - 4
arch/powerpc/mm/slb_low.S

@@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode)
 	/* Linear mapping encoding bits, the "li" instruction below will
 	/* Linear mapping encoding bits, the "li" instruction below will
 	 * be patched by the kernel at boot
 	 * be patched by the kernel at boot
 	 */
 	 */
-_GLOBAL(slb_miss_kernel_load_linear)
+.globl slb_miss_kernel_load_linear
+slb_miss_kernel_load_linear:
 	li	r11,0
 	li	r11,0
 	/*
 	/*
 	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
 	 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
@@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
 	/* Check virtual memmap region. To be patches at kernel boot */
 	/* Check virtual memmap region. To be patches at kernel boot */
 	cmpldi	cr0,r9,0xf
 	cmpldi	cr0,r9,0xf
 	bne	1f
 	bne	1f
-_GLOBAL(slb_miss_kernel_load_vmemmap)
+.globl slb_miss_kernel_load_vmemmap
+slb_miss_kernel_load_vmemmap:
 	li	r11,0
 	li	r11,0
 	b	6f
 	b	6f
 1:
 1:
@@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
 	b	6f
 	b	6f
 5:
 5:
 	/* IO mapping */
 	/* IO mapping */
-	_GLOBAL(slb_miss_kernel_load_io)
+.globl slb_miss_kernel_load_io
+slb_miss_kernel_load_io:
 	li	r11,0
 	li	r11,0
 6:
 6:
 	/*
 	/*
@@ -250,7 +253,8 @@ slb_finish_load:
 7:	ld	r10,PACASTABRR(r13)
 7:	ld	r10,PACASTABRR(r13)
 	addi	r10,r10,1
 	addi	r10,r10,1
 	/* This gets soft patched on boot. */
 	/* This gets soft patched on boot. */
-_GLOBAL(slb_compare_rr_to_size)
+.globl slb_compare_rr_to_size
+slb_compare_rr_to_size:
 	cmpldi	r10,0
 	cmpldi	r10,0
 
 
 	blt+	4f
 	blt+	4f

+ 2 - 1
arch/powerpc/platforms/85xx/smp.c

@@ -27,6 +27,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/dbell.h>
 #include <asm/dbell.h>
 #include <asm/fsl_guts.h>
 #include <asm/fsl_guts.h>
+#include <asm/code-patching.h>
 
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/mpic.h>
 #include <sysdev/mpic.h>
@@ -267,7 +268,7 @@ out:
 	flush_spin_table(spin_table);
 	flush_spin_table(spin_table);
 	out_be32(&spin_table->pir, hw_cpu);
 	out_be32(&spin_table->pir, hw_cpu);
 	out_be64((u64 *)(&spin_table->addr_h),
 	out_be64((u64 *)(&spin_table->addr_h),
-	  __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
+		__pa(ppc_function_entry(generic_secondary_smp_init)));
 	flush_spin_table(spin_table);
 	flush_spin_table(spin_table);
 #endif
 #endif
 
 

+ 3 - 2
arch/powerpc/platforms/cell/smp.c

@@ -40,6 +40,7 @@
 #include <asm/firmware.h>
 #include <asm/firmware.h>
 #include <asm/rtas.h>
 #include <asm/rtas.h>
 #include <asm/cputhreads.h>
 #include <asm/cputhreads.h>
+#include <asm/code-patching.h>
 
 
 #include "interrupt.h"
 #include "interrupt.h"
 #include <asm/udbg.h>
 #include <asm/udbg.h>
@@ -70,8 +71,8 @@ static cpumask_t of_spin_map;
 static inline int smp_startup_cpu(unsigned int lcpu)
 static inline int smp_startup_cpu(unsigned int lcpu)
 {
 {
 	int status;
 	int status;
-	unsigned long start_here = __pa((u32)*((unsigned long *)
-					       generic_secondary_smp_init));
+	unsigned long start_here =
+			__pa(ppc_function_entry(generic_secondary_smp_init));
 	unsigned int pcpu;
 	unsigned int pcpu;
 	int start_cpu;
 	int start_cpu;
 
 

+ 1 - 1
arch/powerpc/platforms/pasemi/powersave.S

@@ -66,7 +66,7 @@ sleep_common:
 	std	r3, 48(r1)
 	std	r3, 48(r1)
 
 
 	/* Only do power savings when in astate 0 */
 	/* Only do power savings when in astate 0 */
-	bl	.check_astate
+	bl	check_astate
 	cmpwi	r3,0
 	cmpwi	r3,0
 	bne	1f
 	bne	1f
 
 

+ 2 - 0
arch/powerpc/platforms/powernv/opal-takeover.S

@@ -21,11 +21,13 @@
 _GLOBAL(opal_query_takeover)
 _GLOBAL(opal_query_takeover)
 	mfcr	r0
 	mfcr	r0
 	stw	r0,8(r1)
 	stw	r0,8(r1)
+	stdu	r1,-STACKFRAMESIZE(r1)
 	std	r3,STK_PARAM(R3)(r1)
 	std	r3,STK_PARAM(R3)(r1)
 	std	r4,STK_PARAM(R4)(r1)
 	std	r4,STK_PARAM(R4)(r1)
 	li	r3,H_HAL_TAKEOVER
 	li	r3,H_HAL_TAKEOVER
 	li	r4,H_HAL_TAKEOVER_QUERY_MAGIC
 	li	r4,H_HAL_TAKEOVER_QUERY_MAGIC
 	HVSC
 	HVSC
+	addi	r1,r1,STACKFRAMESIZE
 	ld	r10,STK_PARAM(R3)(r1)
 	ld	r10,STK_PARAM(R3)(r1)
 	std	r4,0(r10)
 	std	r4,0(r10)
 	ld	r10,STK_PARAM(R4)(r1)
 	ld	r10,STK_PARAM(R4)(r1)

+ 2 - 2
arch/powerpc/platforms/powernv/opal-wrappers.S

@@ -32,7 +32,7 @@
 	std	r12,PACASAVEDMSR(r13);	\
 	std	r12,PACASAVEDMSR(r13);	\
 	andc	r12,r12,r0;		\
 	andc	r12,r12,r0;		\
 	mtmsrd	r12,1;			\
 	mtmsrd	r12,1;			\
-	LOAD_REG_ADDR(r0,.opal_return);	\
+	LOAD_REG_ADDR(r0,opal_return);	\
 	mtlr	r0;			\
 	mtlr	r0;			\
 	li	r0,MSR_DR|MSR_IR|MSR_LE;\
 	li	r0,MSR_DR|MSR_IR|MSR_LE;\
 	andc	r12,r12,r0;		\
 	andc	r12,r12,r0;		\
@@ -44,7 +44,7 @@
 	mtspr	SPRN_HSRR0,r12;		\
 	mtspr	SPRN_HSRR0,r12;		\
 	hrfid
 	hrfid
 
 
-_STATIC(opal_return)
+opal_return:
 	/*
 	/*
 	 * Fixup endian on OPAL return... we should be able to simplify
 	 * Fixup endian on OPAL return... we should be able to simplify
 	 * this by instead converting the below trampoline to a set of
 	 * this by instead converting the below trampoline to a set of

+ 3 - 2
arch/powerpc/platforms/powernv/smp.c

@@ -31,6 +31,7 @@
 #include <asm/xics.h>
 #include <asm/xics.h>
 #include <asm/opal.h>
 #include <asm/opal.h>
 #include <asm/runlatch.h>
 #include <asm/runlatch.h>
+#include <asm/code-patching.h>
 
 
 #include "powernv.h"
 #include "powernv.h"
 
 
@@ -50,8 +51,8 @@ static void pnv_smp_setup_cpu(int cpu)
 int pnv_smp_kick_cpu(int nr)
 int pnv_smp_kick_cpu(int nr)
 {
 {
 	unsigned int pcpu = get_hard_smp_processor_id(nr);
 	unsigned int pcpu = get_hard_smp_processor_id(nr);
-	unsigned long start_here = __pa(*((unsigned long *)
-					  generic_secondary_smp_init));
+	unsigned long start_here =
+			__pa(ppc_function_entry(generic_secondary_smp_init));
 	long rc;
 	long rc;
 
 
 	BUG_ON(nr < 0 || nr >= NR_CPUS);
 	BUG_ON(nr < 0 || nr >= NR_CPUS);

+ 2 - 2
arch/powerpc/platforms/pseries/hvCall.S

@@ -49,7 +49,7 @@ END_FTR_SECTION(0, 1);						\
 	std	r0,16(r1);					\
 	std	r0,16(r1);					\
 	addi	r4,r1,STK_PARAM(FIRST_REG);			\
 	addi	r4,r1,STK_PARAM(FIRST_REG);			\
 	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
 	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
-	bl	.__trace_hcall_entry;				\
+	bl	__trace_hcall_entry;				\
 	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
 	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
 	ld	r0,16(r1);					\
 	ld	r0,16(r1);					\
 	ld	r3,STK_PARAM(R3)(r1);				\
 	ld	r3,STK_PARAM(R3)(r1);				\
@@ -83,7 +83,7 @@ END_FTR_SECTION(0, 1);						\
 	mr	r3,r6;						\
 	mr	r3,r6;						\
 	std	r0,16(r1);					\
 	std	r0,16(r1);					\
 	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
 	stdu	r1,-STACK_FRAME_OVERHEAD(r1);			\
-	bl	.__trace_hcall_exit;				\
+	bl	__trace_hcall_exit;				\
 	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
 	addi	r1,r1,STACK_FRAME_OVERHEAD;			\
 	ld	r0,16(r1);					\
 	ld	r0,16(r1);					\
 	ld	r3,STK_PARAM(R3)(r1);				\
 	ld	r3,STK_PARAM(R3)(r1);				\

+ 3 - 2
arch/powerpc/platforms/pseries/smp.c

@@ -44,6 +44,7 @@
 #include <asm/xics.h>
 #include <asm/xics.h>
 #include <asm/dbell.h>
 #include <asm/dbell.h>
 #include <asm/plpar_wrappers.h>
 #include <asm/plpar_wrappers.h>
+#include <asm/code-patching.h>
 
 
 #include "pseries.h"
 #include "pseries.h"
 #include "offline_states.h"
 #include "offline_states.h"
@@ -96,8 +97,8 @@ int smp_query_cpu_stopped(unsigned int pcpu)
 static inline int smp_startup_cpu(unsigned int lcpu)
 static inline int smp_startup_cpu(unsigned int lcpu)
 {
 {
 	int status;
 	int status;
-	unsigned long start_here = __pa((u32)*((unsigned long *)
-					       generic_secondary_smp_init));
+	unsigned long start_here =
+			__pa(ppc_function_entry(generic_secondary_smp_init));
 	unsigned int pcpu;
 	unsigned int pcpu;
 	int start_cpu;
 	int start_cpu;
 
 

+ 2 - 1
arch/powerpc/platforms/wsp/scom_smp.c

@@ -20,6 +20,7 @@
 #include <asm/reg_a2.h>
 #include <asm/reg_a2.h>
 #include <asm/scom.h>
 #include <asm/scom.h>
 #include <asm/udbg.h>
 #include <asm/udbg.h>
+#include <asm/code-patching.h>
 
 
 #include "wsp.h"
 #include "wsp.h"
 
 
@@ -405,7 +406,7 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
 			goto fail;
 			goto fail;
 	}
 	}
 
 
-	start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init
+	start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init
 					: generic_secondary_thread_init);
 					: generic_secondary_thread_init);
 	pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
 	pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
 
 

+ 4 - 1
tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h

@@ -46,12 +46,15 @@
 #define R20 r20
 #define R20 r20
 #define R21 r21
 #define R21 r21
 #define R22 r22
 #define R22 r22
+#define R29 r29
+#define R30 r30
+#define R31 r31
 
 
 #define STACKFRAMESIZE	256
 #define STACKFRAMESIZE	256
-#define STK_PARAM(i)	(48 + ((i)-3)*8)
 #define STK_REG(i)	(112 + ((i)-14)*8)
 #define STK_REG(i)	(112 + ((i)-14)*8)
 
 
 #define _GLOBAL(A) FUNC_START(test_ ## A)
 #define _GLOBAL(A) FUNC_START(test_ ## A)
+#define _GLOBAL_TOC(A) _GLOBAL(A)
 
 
 #define PPC_MTOCRF(A, B)	mtocrf A, B
 #define PPC_MTOCRF(A, B)	mtocrf A, B