瀏覽代碼

Merge branch 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild

Pull kbuild updates from Michal Marek:

 - EXPORT_SYMBOL for asm source by Al Viro.

   This does bring a regression, because genksyms no longer generates
   checksums for these symbols (CONFIG_MODVERSIONS). Nick Piggin is
   working on a patch to fix this.

   Plus, we are talking about functions like strcpy(), which rarely
   change prototypes.

 - Fixes for PPC fallout of the above by Stephen Rothwell and Nick
   Piggin

 - fixdep speedup by Alexey Dobriyan.

 - preparatory work by Nick Piggin to allow architectures to build with
   -ffunction-sections, -fdata-sections and --gc-sections

 - CONFIG_THIN_ARCHIVES support by Stephen Rothwell

 - fix for filenames with colons in the initramfs source by me.

* 'kbuild' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild: (22 commits)
  initramfs: Escape colons in depfile
  ppc: there is no clear_pages to export
  powerpc/64: whitelist unresolved modversions CRCs
  kbuild: -ffunction-sections fix for archs with conflicting sections
  kbuild: add arch specific post-link Makefile
  kbuild: allow archs to select link dead code/data elimination
  kbuild: allow architectures to use thin archives instead of ld -r
  kbuild: Regenerate genksyms lexer
  kbuild: genksyms fix for typeof handling
  fixdep: faster CONFIG_ search
  ia64: move exports to definitions
  sparc32: debride memcpy.S a bit
  [sparc] unify 32bit and 64bit string.h
  sparc: move exports to definitions
  ppc: move exports to definitions
  arm: move exports to definitions
  s390: move exports to definitions
  m68k: move exports to definitions
  alpha: move exports to actual definitions
  x86: move exports to actual definitions
  ...
Linus Torvalds 8 年之前
父節點
當前提交
84d69848c9
共有 100 個文件被更改,包括 309 次插入435 次删除
  1. 16 0
      Documentation/kbuild/makefiles.txt
  2. 16 3
      Makefile
  3. 21 0
      arch/Kconfig
  4. 1 0
      arch/alpha/include/asm/Kbuild
  5. 1 1
      arch/alpha/kernel/Makefile
  6. 0 102
      arch/alpha/kernel/alpha_ksyms.c
  7. 4 2
      arch/alpha/kernel/machvec_impl.h
  8. 1 0
      arch/alpha/kernel/setup.c
  9. 5 0
      arch/alpha/lib/callback_srm.S
  10. 3 0
      arch/alpha/lib/checksum.c
  11. 2 1
      arch/alpha/lib/clear_page.S
  12. 2 0
      arch/alpha/lib/clear_user.S
  13. 2 1
      arch/alpha/lib/copy_page.S
  14. 3 0
      arch/alpha/lib/copy_user.S
  15. 2 0
      arch/alpha/lib/csum_ipv6_magic.S
  16. 2 0
      arch/alpha/lib/csum_partial_copy.c
  17. 2 0
      arch/alpha/lib/dec_and_lock.c
  18. 3 0
      arch/alpha/lib/divide.S
  19. 2 1
      arch/alpha/lib/ev6-clear_page.S
  20. 2 1
      arch/alpha/lib/ev6-clear_user.S
  21. 2 1
      arch/alpha/lib/ev6-copy_page.S
  22. 2 1
      arch/alpha/lib/ev6-copy_user.S
  23. 2 0
      arch/alpha/lib/ev6-csum_ipv6_magic.S
  24. 3 0
      arch/alpha/lib/ev6-divide.S
  25. 2 1
      arch/alpha/lib/ev6-memchr.S
  26. 2 1
      arch/alpha/lib/ev6-memcpy.S
  27. 6 1
      arch/alpha/lib/ev6-memset.S
  28. 2 1
      arch/alpha/lib/ev67-strcat.S
  29. 2 1
      arch/alpha/lib/ev67-strchr.S
  30. 2 1
      arch/alpha/lib/ev67-strlen.S
  31. 2 1
      arch/alpha/lib/ev67-strncat.S
  32. 2 1
      arch/alpha/lib/ev67-strrchr.S
  33. 7 0
      arch/alpha/lib/fpreg.c
  34. 2 1
      arch/alpha/lib/memchr.S
  35. 2 3
      arch/alpha/lib/memcpy.c
  36. 2 1
      arch/alpha/lib/memmove.S
  37. 6 1
      arch/alpha/lib/memset.S
  38. 2 0
      arch/alpha/lib/strcat.S
  39. 2 1
      arch/alpha/lib/strchr.S
  40. 2 1
      arch/alpha/lib/strcpy.S
  41. 2 1
      arch/alpha/lib/strlen.S
  42. 2 1
      arch/alpha/lib/strncat.S
  43. 2 1
      arch/alpha/lib/strncpy.S
  44. 2 1
      arch/alpha/lib/strrchr.S
  45. 1 0
      arch/arm/include/asm/Kbuild
  46. 1 1
      arch/arm/kernel/Makefile
  47. 0 183
      arch/arm/kernel/armksyms.c
  48. 3 0
      arch/arm/kernel/entry-ftrace.S
  49. 3 0
      arch/arm/kernel/head.S
  50. 3 0
      arch/arm/kernel/smccc-call.S
  51. 3 0
      arch/arm/lib/ashldi3.S
  52. 3 0
      arch/arm/lib/ashrdi3.S
  53. 5 0
      arch/arm/lib/bitops.h
  54. 3 0
      arch/arm/lib/bswapsdi2.S
  55. 4 0
      arch/arm/lib/clear_user.S
  56. 2 0
      arch/arm/lib/copy_from_user.S
  57. 2 0
      arch/arm/lib/copy_page.S
  58. 4 0
      arch/arm/lib/copy_to_user.S
  59. 2 1
      arch/arm/lib/csumipv6.S
  60. 2 0
      arch/arm/lib/csumpartial.S
  61. 1 0
      arch/arm/lib/csumpartialcopy.S
  62. 2 0
      arch/arm/lib/csumpartialcopygeneric.S
  63. 1 0
      arch/arm/lib/csumpartialcopyuser.S
  64. 2 0
      arch/arm/lib/delay.c
  65. 2 0
      arch/arm/lib/div64.S
  66. 9 0
      arch/arm/lib/findbit.S
  67. 9 0
      arch/arm/lib/getuser.S
  68. 2 0
      arch/arm/lib/io-readsb.S
  69. 2 0
      arch/arm/lib/io-readsl.S
  70. 2 1
      arch/arm/lib/io-readsw-armv3.S
  71. 2 0
      arch/arm/lib/io-readsw-armv4.S
  72. 2 0
      arch/arm/lib/io-writesb.S
  73. 2 0
      arch/arm/lib/io-writesl.S
  74. 2 0
      arch/arm/lib/io-writesw-armv3.S
  75. 2 0
      arch/arm/lib/io-writesw-armv4.S
  76. 9 0
      arch/arm/lib/lib1funcs.S
  77. 3 0
      arch/arm/lib/lshrdi3.S
  78. 2 0
      arch/arm/lib/memchr.S
  79. 3 0
      arch/arm/lib/memcpy.S
  80. 2 0
      arch/arm/lib/memmove.S
  81. 3 0
      arch/arm/lib/memset.S
  82. 2 0
      arch/arm/lib/memzero.S
  83. 3 0
      arch/arm/lib/muldi3.S
  84. 5 0
      arch/arm/lib/putuser.S
  85. 2 0
      arch/arm/lib/strchr.S
  86. 2 0
      arch/arm/lib/strrchr.S
  87. 3 0
      arch/arm/lib/uaccess_with_memcpy.c
  88. 3 0
      arch/arm/lib/ucmpdi2.S
  89. 0 1
      arch/arm/mach-imx/Makefile
  90. 0 20
      arch/arm/mach-imx/ssi-fiq-ksym.c
  91. 6 1
      arch/arm/mach-imx/ssi-fiq.S
  92. 1 1
      arch/ia64/hp/sim/boot/Makefile
  93. 3 0
      arch/ia64/include/asm/export.h
  94. 3 0
      arch/ia64/kernel/entry.S
  95. 2 0
      arch/ia64/kernel/esi_stub.S
  96. 2 0
      arch/ia64/kernel/head.S
  97. 2 92
      arch/ia64/kernel/ia64_ksyms.c
  98. 2 0
      arch/ia64/kernel/ivt.S
  99. 7 0
      arch/ia64/kernel/pal.S
  100. 4 0
      arch/ia64/kernel/setup.c

+ 16 - 0
Documentation/kbuild/makefiles.txt

@@ -41,6 +41,7 @@ This document describes the Linux kernel Makefiles.
 	   --- 6.8 Custom kbuild commands
 	   --- 6.9 Preprocessing linker scripts
 	   --- 6.10 Generic header files
+	   --- 6.11 Post-link pass
 
 	=== 7 Kbuild syntax for exported headers
 		--- 7.1 header-y
@@ -1237,6 +1238,21 @@ When kbuild executes, the following steps are followed (roughly):
 	to list the file in the Kbuild file.
 	See "7.4 generic-y" for further info on syntax etc.
 
+--- 6.11 Post-link pass
+
+	If the file arch/xxx/Makefile.postlink exists, this makefile
+	will be invoked for post-link objects (vmlinux and modules.ko)
+	for architectures to run post-link passes on. Must also handle
+	the clean target.
+
+	This pass runs after kallsyms generation. If the architecture
+	needs to modify symbol locations, rather than manipulate the
+	kallsyms, it may be easier to add another postlink target for
+	.tmp_vmlinux? targets to be called from link-vmlinux.sh.
+
+	For example, powerpc uses this to check relocation sanity of
+	the linked vmlinux file.
+
 === 7 Kbuild syntax for exported headers
 
 The kernel includes a set of headers that is exported to userspace.

+ 16 - 3
Makefile

@@ -623,6 +623,11 @@ KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,maybe-uninitialized,)
 KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
 
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+KBUILD_CFLAGS	+= $(call cc-option,-ffunction-sections,)
+KBUILD_CFLAGS	+= $(call cc-option,-fdata-sections,)
+endif
+
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS	+= -Os
 else
@@ -803,6 +808,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
 KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
 LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
 
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+LDFLAGS_vmlinux	+= $(call ld-option, --gc-sections,)
+endif
+
 ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
 LDFLAGS_vmlinux	+= $(call ld-option, -X,)
 endif
@@ -942,9 +951,12 @@ endif
 include/generated/autoksyms.h: FORCE
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
 
-# Final link of vmlinux
-      cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
-quiet_cmd_link-vmlinux = LINK    $@
+ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
+
+# Final link of vmlinux with optional arch pass after final link
+    cmd_link-vmlinux =                                                 \
+	$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ;       \
+	$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
 
 vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
 	+$(call if_changed,link-vmlinux)
@@ -1271,6 +1283,7 @@ $(clean-dirs):
 
 vmlinuxclean:
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
+	$(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
 
 clean: archclean vmlinuxclean
 

+ 21 - 0
arch/Kconfig

@@ -450,6 +450,27 @@ config CC_STACKPROTECTOR_STRONG
 
 endchoice
 
+config THIN_ARCHIVES
+	bool
+	help
+	  Select this if the architecture wants to use thin archives
+	  instead of ld -r to create the built-in.o files.
+
+config LD_DEAD_CODE_DATA_ELIMINATION
+	bool
+	help
+	  Select this if the architecture wants to do dead code and
+	  data elimination with the linker by compiling with
+	  -ffunction-sections -fdata-sections and linking with
+	  --gc-sections.
+
+	  This requires that the arch annotates or otherwise protects
+	  its external entry points from being discarded. Linker scripts
+	  must also merge .text.*, .data.*, and .bss.* correctly into
+	  output sections. Care must be taken not to pull in unrelated
+	  sections (e.g., '.text.init'). Typically '.' in section names
+	  is used to distinguish them from label names / C identifiers.
+
 config HAVE_ARCH_WITHIN_STACK_FRAMES
 	bool
 	help

+ 1 - 0
arch/alpha/include/asm/Kbuild

@@ -3,6 +3,7 @@
 generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += exec.h
+generic-y += export.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h

+ 1 - 1
arch/alpha/kernel/Makefile

@@ -8,7 +8,7 @@ ccflags-y	:= -Wno-sign-compare
 
 obj-y    := entry.o traps.o process.o osf_sys.o irq.o \
 	    irq_alpha.o signal.o setup.o ptrace.o time.o \
-	    alpha_ksyms.o systbls.o err_common.o io.o
+	    systbls.o err_common.o io.o
 
 obj-$(CONFIG_VGA_HOSE)	+= console.o
 obj-$(CONFIG_SMP)	+= smp.o

+ 0 - 102
arch/alpha/kernel/alpha_ksyms.c

@@ -1,102 +0,0 @@
-/*
- * linux/arch/alpha/kernel/alpha_ksyms.c
- *
- * Export the alpha-specific functions that are needed for loadable
- * modules.
- */
-
-#include <linux/module.h>
-#include <asm/console.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpu.h>
-#include <asm/machvec.h>
-
-#include <linux/syscalls.h>
-
-/* these are C runtime functions with special calling conventions: */
-extern void __divl (void);
-extern void __reml (void);
-extern void __divq (void);
-extern void __remq (void);
-extern void __divlu (void);
-extern void __remlu (void);
-extern void __divqu (void);
-extern void __remqu (void);
-
-EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(callback_getenv);
-EXPORT_SYMBOL(callback_setenv);
-EXPORT_SYMBOL(callback_save_env);
-
-/* platform dependent support */
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(___memset);
-EXPORT_SYMBOL(__memsetw);
-EXPORT_SYMBOL(__constant_c_memset);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(alpha_read_fp_reg);
-EXPORT_SYMBOL(alpha_read_fp_reg_s);
-EXPORT_SYMBOL(alpha_write_fp_reg);
-EXPORT_SYMBOL(alpha_write_fp_reg_s);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_tcpudp_magic);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#ifdef CONFIG_MATHEMU_MODULE
-extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
-extern long (*alpha_fp_emul) (unsigned long pc);
-EXPORT_SYMBOL(alpha_fp_emul_imprecise);
-EXPORT_SYMBOL(alpha_fp_emul);
-#endif
-
-/*
- * The following are specially called from the uaccess assembly stubs.
- */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-
-/* 
- * SMP-specific symbols.
- */
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* CONFIG_SMP */
-
-/*
- * The following are special because they're not called
- * explicitly (the C compiler or assembler generates them in
- * response to division operations).  Fortunately, their
- * interface isn't gonna change any time soon now, so it's OK
- * to leave it out of version control.
- */
-# undef memcpy
-# undef memset
-EXPORT_SYMBOL(__divl);
-EXPORT_SYMBOL(__divlu);
-EXPORT_SYMBOL(__divq);
-EXPORT_SYMBOL(__divqu);
-EXPORT_SYMBOL(__reml);
-EXPORT_SYMBOL(__remlu);
-EXPORT_SYMBOL(__remq);
-EXPORT_SYMBOL(__remqu);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);

+ 4 - 2
arch/alpha/kernel/machvec_impl.h

@@ -144,9 +144,11 @@
    else beforehand.  Fine.  We'll do it ourselves.  */
 #if 0
 #define ALIAS_MV(system) \
-  struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+  struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \
+  EXPORT_SYMBOL(alpha_mv);
 #else
 #define ALIAS_MV(system) \
-  asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+  asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \
+  EXPORT_SYMBOL(alpha_mv);
 #endif
 #endif /* GENERIC */

+ 1 - 0
arch/alpha/kernel/setup.c

@@ -115,6 +115,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
 
 #ifdef CONFIG_ALPHA_GENERIC
 struct alpha_machine_vector alpha_mv;
+EXPORT_SYMBOL(alpha_mv);
 #endif
 
 #ifndef alpha_using_srm

+ 5 - 0
arch/alpha/lib/callback_srm.S

@@ -3,6 +3,7 @@
  */
 
 #include <asm/console.h>
+#include <asm/export.h>
 
 .text
 #define HWRPB_CRB_OFFSET 0xc0
@@ -92,6 +93,10 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4)
 CALLBACK(save_env, CCB_SAVE_ENV, 1)
 CALLBACK(pswitch, CCB_PSWITCH, 3)
 CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
+
+EXPORT_SYMBOL(callback_getenv)
+EXPORT_SYMBOL(callback_setenv)
+EXPORT_SYMBOL(callback_save_env)
 	
 .data
 __alpha_using_srm:		# For use by bootpheader

+ 3 - 0
arch/alpha/lib/checksum.c

@@ -48,6 +48,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
 		(__force u64)saddr + (__force u64)daddr +
 		(__force u64)sum + ((len + proto) << 8));
 }
+EXPORT_SYMBOL(csum_tcpudp_magic);
 
 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 			  __u32 len, __u8 proto, __wsum sum)
@@ -144,6 +145,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 {
 	return (__force __sum16)~do_csum(iph,ihl*4);
 }
+EXPORT_SYMBOL(ip_fast_csum);
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -178,3 +180,4 @@ __sum16 ip_compute_csum(const void *buff, int len)
 {
 	return (__force __sum16)~from64to16(do_csum(buff,len));
 }
+EXPORT_SYMBOL(ip_compute_csum);

+ 2 - 1
arch/alpha/lib/clear_page.S

@@ -3,7 +3,7 @@
  *
  * Zero an entire page.
  */
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global clear_page
@@ -37,3 +37,4 @@ clear_page:
 	nop
 
 	.end clear_page
+	EXPORT_SYMBOL(clear_page)

+ 2 - 0
arch/alpha/lib/clear_user.S

@@ -24,6 +24,7 @@
  * Clobbers:
  *	$1,$2,$3,$4,$5,$6
  */
+#include <asm/export.h>
 
 /* Allow an exception for an insn; exit if we get one.  */
 #define EX(x,y...)			\
@@ -111,3 +112,4 @@ $exception:
 	ret	$31, ($28), 1	# .. e1 :
 
 	.end __do_clear_user
+	EXPORT_SYMBOL(__do_clear_user)

+ 2 - 1
arch/alpha/lib/copy_page.S

@@ -3,7 +3,7 @@
  *
  * Copy an entire page.
  */
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global copy_page
@@ -47,3 +47,4 @@ copy_page:
 	nop
 
 	.end copy_page
+	EXPORT_SYMBOL(copy_page)

+ 3 - 0
arch/alpha/lib/copy_user.S

@@ -26,6 +26,8 @@
  *	$1,$2,$3,$4,$5,$6,$7
  */
 
+#include <asm/export.h>
+
 /* Allow an exception for an insn; exit if we get one.  */
 #define EXI(x,y...)			\
 	99: x,##y;			\
@@ -143,3 +145,4 @@ $101:
 	ret $31,($28),1
 
 	.end __copy_user
+EXPORT_SYMBOL(__copy_user)

+ 2 - 0
arch/alpha/lib/csum_ipv6_magic.S

@@ -12,6 +12,7 @@
  * added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  */
 
+#include <asm/export.h>
 	.globl csum_ipv6_magic
 	.align 4
 	.ent csum_ipv6_magic
@@ -113,3 +114,4 @@ csum_ipv6_magic:
 	ret			# .. e1 :
 
 	.end csum_ipv6_magic
+	EXPORT_SYMBOL(csum_ipv6_magic)

+ 2 - 0
arch/alpha/lib/csum_partial_copy.c

@@ -374,6 +374,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
 	}
 	return (__force __wsum)checksum;
 }
+EXPORT_SYMBOL(csum_partial_copy_from_user);
 
 __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
@@ -386,3 +387,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 	set_fs(oldfs);
 	return checksum;
 }
+EXPORT_SYMBOL(csum_partial_copy_nocheck);

+ 2 - 0
arch/alpha/lib/dec_and_lock.c

@@ -7,6 +7,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
+#include <linux/export.h>
 
   asm (".text					\n\
 	.global _atomic_dec_and_lock		\n\
@@ -39,3 +40,4 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
 	spin_unlock(lock);
 	return 0;
 }
+EXPORT_SYMBOL(_atomic_dec_and_lock);

+ 3 - 0
arch/alpha/lib/divide.S

@@ -45,6 +45,7 @@
  *	$28 - compare status
  */
 
+#include <asm/export.h>
 #define halt .long 0
 
 /*
@@ -151,6 +152,7 @@ ufunction:
 	addq	$30,STACK,$30
 	ret	$31,($23),1
 	.end	ufunction
+EXPORT_SYMBOL(ufunction)
 
 /*
  * Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -193,3 +195,4 @@ sfunction:
 	addq	$30,STACK,$30
 	ret	$31,($23),1
 	.end	sfunction
+EXPORT_SYMBOL(sfunction)

+ 2 - 1
arch/alpha/lib/ev6-clear_page.S

@@ -3,7 +3,7 @@
  *
  * Zero an entire page.
  */
-
+#include <asm/export.h>
         .text
         .align 4
         .global clear_page
@@ -52,3 +52,4 @@ clear_page:
 	nop
 
 	.end clear_page
+	EXPORT_SYMBOL(clear_page)

+ 2 - 1
arch/alpha/lib/ev6-clear_user.S

@@ -43,6 +43,7 @@
  *	want to leave a hole (and we also want to avoid repeating lots of work)
  */
 
+#include <asm/export.h>
 /* Allow an exception for an insn; exit if we get one.  */
 #define EX(x,y...)			\
 	99: x,##y;			\
@@ -222,4 +223,4 @@ $exception:			# Destination for exception recovery(?)
 	nop			# .. E  .. ..	:
 	ret	$31, ($28), 1	# L0 .. .. ..	: L U L U
 	.end __do_clear_user
-
+	EXPORT_SYMBOL(__do_clear_user)

+ 2 - 1
arch/alpha/lib/ev6-copy_page.S

@@ -56,7 +56,7 @@
    destination pages are in the dcache, but it is my guess that this is
    less important than the dcache miss case.  */
 
-
+#include <asm/export.h>
 	.text
 	.align 4
 	.global copy_page
@@ -201,3 +201,4 @@ copy_page:
 	nop
 
 	.end copy_page
+	EXPORT_SYMBOL(copy_page)

+ 2 - 1
arch/alpha/lib/ev6-copy_user.S

@@ -37,6 +37,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
 
+#include <asm/export.h>
 /* Allow an exception for an insn; exit if we get one.  */
 #define EXI(x,y...)			\
 	99: x,##y;			\
@@ -256,4 +257,4 @@ $101:
 	ret $31,($28),1		# L0
 
 	.end __copy_user
-
+	EXPORT_SYMBOL(__copy_user)

+ 2 - 0
arch/alpha/lib/ev6-csum_ipv6_magic.S

@@ -52,6 +52,7 @@
  * may cause additional delay in rare cases (load-load replay traps).
  */
 
+#include <asm/export.h>
 	.globl csum_ipv6_magic
 	.align 4
 	.ent csum_ipv6_magic
@@ -148,3 +149,4 @@ csum_ipv6_magic:
 	ret			# L0 : L U L U
 
 	.end csum_ipv6_magic
+	EXPORT_SYMBOL(csum_ipv6_magic)

+ 3 - 0
arch/alpha/lib/ev6-divide.S

@@ -55,6 +55,7 @@
  * Try not to change the actual algorithm if possible for consistency.
  */
 
+#include <asm/export.h>
 #define halt .long 0
 
 /*
@@ -205,6 +206,7 @@ ufunction:
 	addq	$30,STACK,$30		# E :
 	ret	$31,($23),1		# L0 : L U U L
 	.end	ufunction
+EXPORT_SYMBOL(ufunction)
 
 /*
  * Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -257,3 +259,4 @@ sfunction:
 	addq	$30,STACK,$30		# E :
 	ret	$31,($23),1		# L0 : L U U L
 	.end	sfunction
+EXPORT_SYMBOL(sfunction)

+ 2 - 1
arch/alpha/lib/ev6-memchr.S

@@ -27,7 +27,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  * Try not to change the actual algorithm if possible for consistency.
  */
-
+#include <asm/export.h>
         .set noreorder
         .set noat
 
@@ -189,3 +189,4 @@ $not_found:
 	ret			# L0 :
 
         .end memchr
+	EXPORT_SYMBOL(memchr)

+ 2 - 1
arch/alpha/lib/ev6-memcpy.S

@@ -19,7 +19,7 @@
  * Temp usage notes:
  *	$1,$2,		- scratch
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -242,6 +242,7 @@ $nomoredata:
 	nop				# E :
 
 	.end memcpy
+	EXPORT_SYMBOL(memcpy)
 
 /* For backwards module compatibility.  */
 __memcpy = memcpy

+ 6 - 1
arch/alpha/lib/ev6-memset.S

@@ -26,7 +26,7 @@
  * as fixes will need to be made in multiple places.  The performance gain
  * is worth it.
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 .text
@@ -229,6 +229,7 @@ end_b:
 	nop
 	ret $31,($26),1		# L0 :
 	.end ___memset
+	EXPORT_SYMBOL(___memset)
 
 	/*
 	 * This is the original body of code, prior to replication and
@@ -406,6 +407,7 @@ end:
 	nop
 	ret $31,($26),1		# L0 :
 	.end __constant_c_memset
+	EXPORT_SYMBOL(__constant_c_memset)
 
 	/*
 	 * This is a replicant of the __constant_c_memset code, rescheduled
@@ -594,6 +596,9 @@ end_w:
 	ret $31,($26),1		# L0 :
 
 	.end __memsetw
+	EXPORT_SYMBOL(__memsetw)
 
 memset = ___memset
 __memset = ___memset
+	EXPORT_SYMBOL(memset)
+	EXPORT_SYMBOL(__memset)

+ 2 - 1
arch/alpha/lib/ev67-strcat.S

@@ -19,7 +19,7 @@
  * string once.
  */
 
-
+#include <asm/export.h>
 	.text
 
 	.align 4
@@ -52,3 +52,4 @@ $found:	cttz	$2, $3		# U0 :
 	br	__stxcpy	# L0 :
 
 	.end strcat
+	EXPORT_SYMBOL(strcat)

+ 2 - 1
arch/alpha/lib/ev67-strchr.S

@@ -15,7 +15,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  * Try not to change the actual algorithm if possible for consistency.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -86,3 +86,4 @@ $found:	negq    t0, t1		# E : clear all but least set bit
 	ret			# L0 :
 
 	.end strchr
+	EXPORT_SYMBOL(strchr)

+ 2 - 1
arch/alpha/lib/ev67-strlen.S

@@ -17,7 +17,7 @@
  *	U	- upper subcluster; U0 - subcluster U0; U1 - subcluster U1
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -47,3 +47,4 @@ $found:
 	ret	$31, ($26)	# L0 :
 
 	.end	strlen
+	EXPORT_SYMBOL(strlen)

+ 2 - 1
arch/alpha/lib/ev67-strncat.S

@@ -20,7 +20,7 @@
  * Try not to change the actual algorithm if possible for consistency.
  */
 
-
+#include <asm/export.h>
 	.text
 
 	.align 4
@@ -92,3 +92,4 @@ $zerocount:
 	ret			# L0 :
 
 	.end strncat
+	EXPORT_SYMBOL(strncat)

+ 2 - 1
arch/alpha/lib/ev67-strrchr.S

@@ -18,7 +18,7 @@
  *	L	- lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  */
 
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -107,3 +107,4 @@ $eos:
 	nop
 
 	.end strrchr
+	EXPORT_SYMBOL(strrchr)

+ 7 - 0
arch/alpha/lib/fpreg.c

@@ -4,6 +4,9 @@
  * (C) Copyright 1998 Linus Torvalds
  */
 
+#include <linux/compiler.h>
+#include <linux/export.h>
+
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define STT(reg,val)  asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
 #else
@@ -52,6 +55,7 @@ alpha_read_fp_reg (unsigned long reg)
 	}
 	return val;
 }
+EXPORT_SYMBOL(alpha_read_fp_reg);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define LDT(reg,val)  asm volatile ("itoft %0,$f"#reg : : "r"(val));
@@ -97,6 +101,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
 	      case 31: LDT(31, val); break;
 	}
 }
+EXPORT_SYMBOL(alpha_write_fp_reg);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define STS(reg,val)  asm volatile ("ftois $f"#reg",%0" : "=r"(val));
@@ -146,6 +151,7 @@ alpha_read_fp_reg_s (unsigned long reg)
 	}
 	return val;
 }
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
 
 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
 #define LDS(reg,val)  asm volatile ("itofs %0,$f"#reg : : "r"(val));
@@ -191,3 +197,4 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val)
 	      case 31: LDS(31, val); break;
 	}
 }
+EXPORT_SYMBOL(alpha_write_fp_reg_s);

+ 2 - 1
arch/alpha/lib/memchr.S

@@ -31,7 +31,7 @@ For correctness consider that:
       - only minimum number of quadwords may be accessed
       - the third argument is an unsigned long
 */
-
+#include <asm/export.h>
         .set noreorder
         .set noat
 
@@ -162,3 +162,4 @@ $not_found:
 	ret			# .. e1 :
 
         .end memchr
+	EXPORT_SYMBOL(memchr)

+ 2 - 3
arch/alpha/lib/memcpy.c

@@ -16,6 +16,7 @@
  */
 
 #include <linux/types.h>
+#include <linux/export.h>
 
 /*
  * This should be done in one go with ldq_u*2/mask/stq_u. Do it
@@ -158,6 +159,4 @@ void * memcpy(void * dest, const void *src, size_t n)
 	__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
 	return dest;
 }
-
-/* For backward modules compatibility, define __memcpy.  */
-asm("__memcpy = memcpy; .globl __memcpy");
+EXPORT_SYMBOL(memcpy);

+ 2 - 1
arch/alpha/lib/memmove.S

@@ -6,7 +6,7 @@
  * This is hand-massaged output from the original memcpy.c.  We defer to
  * memcpy whenever possible; the backwards copy loops are not unrolled.
  */
-        
+#include <asm/export.h>        
 	.set noat
 	.set noreorder
 	.text
@@ -179,3 +179,4 @@ $egress:
 	nop
 
 	.end memmove
+	EXPORT_SYMBOL(memmove)

+ 6 - 1
arch/alpha/lib/memset.S

@@ -13,7 +13,7 @@
  * The scheduling comments are according to the EV5 documentation (and done by 
  * hand, so they might well be incorrect, please do tell me about it..)
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 .text
@@ -106,6 +106,8 @@ within_one_quad:
 end:
 	ret $31,($26),1		/* E1 */
 	.end ___memset
+EXPORT_SYMBOL(___memset)
+EXPORT_SYMBOL(__constant_c_memset)
 
 	.align 5
 	.ent __memsetw
@@ -122,6 +124,9 @@ __memsetw:
 	br __constant_c_memset	/* .. E1 */
 
 	.end __memsetw
+EXPORT_SYMBOL(__memsetw)
 
 memset = ___memset
 __memset = ___memset
+	EXPORT_SYMBOL(memset)
+	EXPORT_SYMBOL(__memset)

+ 2 - 0
arch/alpha/lib/strcat.S

@@ -4,6 +4,7 @@
  *
  * Append a null-terminated string from SRC to DST.
  */
+#include <asm/export.h>
 
 	.text
 
@@ -50,3 +51,4 @@ $found:	negq    $2, $3		# clear all but least set bit
 	br	__stxcpy
 
 	.end strcat
+EXPORT_SYMBOL(strcat);

+ 2 - 1
arch/alpha/lib/strchr.S

@@ -5,7 +5,7 @@
  * Return the address of a given character within a null-terminated
  * string, or null if it is not found.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -68,3 +68,4 @@ $retnull:
 	ret			# .. e1 :
 
 	.end strchr
+	EXPORT_SYMBOL(strchr)

+ 2 - 1
arch/alpha/lib/strcpy.S

@@ -5,7 +5,7 @@
  * Copy a null-terminated string from SRC to DST.  Return a pointer
  * to the null-terminator in the source.
  */
-
+#include <asm/export.h>
 	.text
 
 	.align 3
@@ -21,3 +21,4 @@ strcpy:
 	br	__stxcpy	# do the copy
 
 	.end strcpy
+	EXPORT_SYMBOL(strcpy)

+ 2 - 1
arch/alpha/lib/strlen.S

@@ -11,7 +11,7 @@
  *	  do this instead of the 9 instructions that
  *	  binary search needs).
  */
-
+#include <asm/export.h>
 	.set noreorder
 	.set noat
 
@@ -55,3 +55,4 @@ done:	subq	$0, $16, $0
 	ret	$31, ($26)
 
 	.end	strlen
+	EXPORT_SYMBOL(strlen)

+ 2 - 1
arch/alpha/lib/strncat.S

@@ -9,7 +9,7 @@
  * past count, whereas libc may write to count+1.  This follows the generic
  * implementation in lib/string.c and is, IMHO, more sensible.
  */
-
+#include <asm/export.h>
 	.text
 
 	.align 3
@@ -82,3 +82,4 @@ $zerocount:
 	ret
 
 	.end strncat
+	EXPORT_SYMBOL(strncat)

+ 2 - 1
arch/alpha/lib/strncpy.S

@@ -10,7 +10,7 @@
  * version has cropped that bit o' nastiness as well as assuming that
  * __stxncpy is in range of a branch.
  */
-
+#include <asm/export.h>
 	.set noat
 	.set noreorder
 
@@ -79,3 +79,4 @@ $zerolen:
 	ret
 
 	.end	strncpy
+	EXPORT_SYMBOL(strncpy)

+ 2 - 1
arch/alpha/lib/strrchr.S

@@ -5,7 +5,7 @@
  * Return the address of the last occurrence of a given character
  * within a null-terminated string, or null if it is not found.
  */
-
+#include <asm/export.h>
 #include <asm/regdef.h>
 
 	.set noreorder
@@ -85,3 +85,4 @@ $retnull:
 	ret			# .. e1 :
 
 	.end strrchr
+	EXPORT_SYMBOL(strrchr)

+ 1 - 0
arch/arm/include/asm/Kbuild

@@ -8,6 +8,7 @@ generic-y += early_ioremap.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
+generic-y += export.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h

+ 1 - 1
arch/arm/kernel/Makefile

@@ -33,7 +33,7 @@ endif
 obj-$(CONFIG_CPU_IDLE)		+= cpuidle.o
 obj-$(CONFIG_ISA_DMA_API)	+= dma.o
 obj-$(CONFIG_FIQ)		+= fiq.o fiqasm.o
-obj-$(CONFIG_MODULES)		+= armksyms.o module.o
+obj-$(CONFIG_MODULES)		+= module.o
 obj-$(CONFIG_ARM_MODULE_PLTS)	+= module-plts.o
 obj-$(CONFIG_ISA_DMA)		+= dma-isa.o
 obj-$(CONFIG_PCI)		+= bios32.o isa.o

+ 0 - 183
arch/arm/kernel/armksyms.c

@@ -1,183 +0,0 @@
-/*
- *  linux/arch/arm/kernel/armksyms.c
- *
- *  Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/cryptohash.h>
-#include <linux/delay.h>
-#include <linux/in6.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/arm-smccc.h>
-
-#include <asm/checksum.h>
-#include <asm/ftrace.h>
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler...  (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __ucmpdi2(void);
-extern void __udivsi3(void);
-extern void __umodsi3(void);
-extern void __do_div64(void);
-extern void __bswapsi2(void);
-extern void __bswapdi2(void);
-
-extern void __aeabi_idiv(void);
-extern void __aeabi_idivmod(void);
-extern void __aeabi_lasr(void);
-extern void __aeabi_llsl(void);
-extern void __aeabi_llsr(void);
-extern void __aeabi_lmul(void);
-extern void __aeabi_uidiv(void);
-extern void __aeabi_uidivmod(void);
-extern void __aeabi_ulcmp(void);
-
-extern void fpundefinstr(void);
-
-void mmioset(void *, unsigned int, size_t);
-void mmiocpy(void *, const void *, size_t);
-
-	/* platform dependent support */
-EXPORT_SYMBOL(arm_delay_ops);
-
-	/* networking */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_ipv6_magic);
-
-	/* io */
-#ifndef __raw_readsb
-EXPORT_SYMBOL(__raw_readsb);
-#endif
-#ifndef __raw_readsw
-EXPORT_SYMBOL(__raw_readsw);
-#endif
-#ifndef __raw_readsl
-EXPORT_SYMBOL(__raw_readsl);
-#endif
-#ifndef __raw_writesb
-EXPORT_SYMBOL(__raw_writesb);
-#endif
-#ifndef __raw_writesw
-EXPORT_SYMBOL(__raw_writesw);
-#endif
-#ifndef __raw_writesl
-EXPORT_SYMBOL(__raw_writesl);
-#endif
-
-	/* string / mem functions */
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(__memzero);
-
-EXPORT_SYMBOL(mmioset);
-EXPORT_SYMBOL(mmiocpy);
-
-#ifdef CONFIG_MMU
-EXPORT_SYMBOL(copy_page);
-
-EXPORT_SYMBOL(arm_copy_from_user);
-EXPORT_SYMBOL(arm_copy_to_user);
-EXPORT_SYMBOL(arm_clear_user);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(__get_user_64t_1);
-EXPORT_SYMBOL(__get_user_64t_2);
-EXPORT_SYMBOL(__get_user_64t_4);
-EXPORT_SYMBOL(__get_user_32t_8);
-#endif
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-#endif
-
-	/* gcc lib functions */
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__do_div64);
-EXPORT_SYMBOL(__bswapsi2);
-EXPORT_SYMBOL(__bswapdi2);
-
-#ifdef CONFIG_AEABI
-EXPORT_SYMBOL(__aeabi_idiv);
-EXPORT_SYMBOL(__aeabi_idivmod);
-EXPORT_SYMBOL(__aeabi_lasr);
-EXPORT_SYMBOL(__aeabi_llsl);
-EXPORT_SYMBOL(__aeabi_llsr);
-EXPORT_SYMBOL(__aeabi_lmul);
-EXPORT_SYMBOL(__aeabi_uidiv);
-EXPORT_SYMBOL(__aeabi_uidivmod);
-EXPORT_SYMBOL(__aeabi_ulcmp);
-#endif
-
-	/* bitops */
-EXPORT_SYMBOL(_set_bit);
-EXPORT_SYMBOL(_test_and_set_bit);
-EXPORT_SYMBOL(_clear_bit);
-EXPORT_SYMBOL(_test_and_clear_bit);
-EXPORT_SYMBOL(_change_bit);
-EXPORT_SYMBOL(_test_and_change_bit);
-EXPORT_SYMBOL(_find_first_zero_bit_le);
-EXPORT_SYMBOL(_find_next_zero_bit_le);
-EXPORT_SYMBOL(_find_first_bit_le);
-EXPORT_SYMBOL(_find_next_bit_le);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(_find_first_zero_bit_be);
-EXPORT_SYMBOL(_find_next_zero_bit_be);
-EXPORT_SYMBOL(_find_first_bit_be);
-EXPORT_SYMBOL(_find_next_bit_be);
-#endif
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
-EXPORT_SYMBOL(__gnu_mcount_nc);
-#endif
-
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-EXPORT_SYMBOL(__pv_phys_pfn_offset);
-EXPORT_SYMBOL(__pv_offset);
-#endif
-
-#ifdef CONFIG_HAVE_ARM_SMCCC
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
-#endif

+ 3 - 0
arch/arm/kernel/entry-ftrace.S

@@ -7,6 +7,7 @@
 #include <asm/assembler.h>
 #include <asm/ftrace.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #include "entry-header.S"
 
@@ -153,6 +154,7 @@ ENTRY(mcount)
 	__mcount _old
 #endif
 ENDPROC(mcount)
+EXPORT_SYMBOL(mcount)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller_old)
@@ -205,6 +207,7 @@ UNWIND(.fnstart)
 #endif
 UNWIND(.fnend)
 ENDPROC(__gnu_mcount_nc)
+EXPORT_SYMBOL(__gnu_mcount_nc)
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(ftrace_caller)

+ 3 - 0
arch/arm/kernel/head.S

@@ -22,6 +22,7 @@
 #include <asm/memory.h>
 #include <asm/thread_info.h>
 #include <asm/pgtable.h>
+#include <asm/export.h>
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 #include CONFIG_DEBUG_LL_INCLUDE
@@ -727,6 +728,8 @@ __pv_phys_pfn_offset:
 __pv_offset:
 	.quad	0
 	.size	__pv_offset, . -__pv_offset
+EXPORT_SYMBOL(__pv_phys_pfn_offset)
+EXPORT_SYMBOL(__pv_offset)
 #endif
 
 #include "head-common.S"

+ 3 - 0
arch/arm/kernel/smccc-call.S

@@ -16,6 +16,7 @@
 #include <asm/opcodes-sec.h>
 #include <asm/opcodes-virt.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	/*
 	 * Wrap c macros in asm macros to delay expansion until after the
@@ -51,6 +52,7 @@ UNWIND(	.fnend)
 ENTRY(arm_smccc_smc)
 	SMCCC SMCCC_SMC
 ENDPROC(arm_smccc_smc)
+EXPORT_SYMBOL(arm_smccc_smc)
 
 /*
  * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -60,3 +62,4 @@ ENDPROC(arm_smccc_smc)
 ENTRY(arm_smccc_hvc)
 	SMCCC SMCCC_HVC
 ENDPROC(arm_smccc_hvc)
+EXPORT_SYMBOL(arm_smccc_hvc)

+ 3 - 0
arch/arm/lib/ashldi3.S

@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA.  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsl)
 
 ENDPROC(__ashldi3)
 ENDPROC(__aeabi_llsl)
+EXPORT_SYMBOL(__ashldi3)
+EXPORT_SYMBOL(__aeabi_llsl)

+ 3 - 0
arch/arm/lib/ashrdi3.S

@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA.  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_lasr)
 
 ENDPROC(__ashrdi3)
 ENDPROC(__aeabi_lasr)
+EXPORT_SYMBOL(__ashrdi3)
+EXPORT_SYMBOL(__aeabi_lasr)

+ 5 - 0
arch/arm/lib/bitops.h

@@ -1,5 +1,6 @@
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #if __LINUX_ARM_ARCH__ >= 6
 	.macro	bitop, name, instr
@@ -25,6 +26,7 @@ UNWIND(	.fnstart	)
 	bx	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 
 	.macro	testop, name, instr, store
@@ -55,6 +57,7 @@ UNWIND(	.fnstart	)
 2:	bx	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 #else
 	.macro	bitop, name, instr
@@ -74,6 +77,7 @@ UNWIND(	.fnstart	)
 	ret	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 
 /**
@@ -102,5 +106,6 @@ UNWIND(	.fnstart	)
 	ret	lr
 UNWIND(	.fnend		)
 ENDPROC(\name		)
+EXPORT_SYMBOL(\name	)
 	.endm
 #endif

+ 3 - 0
arch/arm/lib/bswapsdi2.S

@@ -1,5 +1,6 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #if __LINUX_ARM_ARCH__ >= 6
 ENTRY(__bswapsi2)
@@ -35,3 +36,5 @@ ENTRY(__bswapdi2)
 	ret lr
 ENDPROC(__bswapdi2)
 #endif
+EXPORT_SYMBOL(__bswapsi2)
+EXPORT_SYMBOL(__bswapdi2)

+ 4 - 0
arch/arm/lib/clear_user.S

@@ -10,6 +10,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 		.text
 
@@ -50,6 +51,9 @@ USER(		strnebt	r2, [r0])
 UNWIND(.fnend)
 ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_clear_user)
+#endif
 
 		.pushsection .text.fixup,"ax"
 		.align	0

+ 2 - 0
arch/arm/lib/copy_from_user.S

@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 /*
  * Prototype:
@@ -94,6 +95,7 @@ ENTRY(arm_copy_from_user)
 #include "copy_template.S"
 
 ENDPROC(arm_copy_from_user)
+EXPORT_SYMBOL(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0

+ 2 - 0
arch/arm/lib/copy_page.S

@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
+#include <asm/export.h>
 
 #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
 
@@ -45,3 +46,4 @@ ENTRY(copy_page)
 	PLD(	beq	2b			)
 		ldmfd	sp!, {r4, pc}			@	3
 ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)

+ 4 - 0
arch/arm/lib/copy_to_user.S

@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 /*
  * Prototype:
@@ -99,6 +100,9 @@ WEAK(arm_copy_to_user)
 
 ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_copy_to_user)
+#endif
 
 	.pushsection .text.fixup,"ax"
 	.align 0

+ 2 - 1
arch/arm/lib/csumipv6.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 
@@ -30,4 +31,4 @@ ENTRY(__csum_ipv6_magic)
 		adcs	r0, r0, #0
 		ldmfd	sp!, {pc}
 ENDPROC(__csum_ipv6_magic)
-
+EXPORT_SYMBOL(__csum_ipv6_magic)

+ 2 - 0
arch/arm/lib/csumpartial.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 
@@ -140,3 +141,4 @@ ENTRY(csum_partial)
 		bne	4b
 		b	.Lless4
 ENDPROC(csum_partial)
+EXPORT_SYMBOL(csum_partial)

+ 1 - 0
arch/arm/lib/csumpartialcopy.S

@@ -49,5 +49,6 @@
 
 #define FN_ENTRY	ENTRY(csum_partial_copy_nocheck)
 #define FN_EXIT		ENDPROC(csum_partial_copy_nocheck)
+#define FN_EXPORT	EXPORT_SYMBOL(csum_partial_copy_nocheck)
 
 #include "csumpartialcopygeneric.S"

+ 2 - 0
arch/arm/lib/csumpartialcopygeneric.S

@@ -8,6 +8,7 @@
  * published by the Free Software Foundation.
  */
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 /*
  * unsigned int
@@ -331,3 +332,4 @@ FN_ENTRY
 		mov	r5, r4, get_byte_1
 		b	.Lexit
 FN_EXIT
+FN_EXPORT

+ 1 - 0
arch/arm/lib/csumpartialcopyuser.S

@@ -73,6 +73,7 @@
 
 #define FN_ENTRY	ENTRY(csum_partial_copy_from_user)
 #define FN_EXIT		ENDPROC(csum_partial_copy_from_user)
+#define FN_EXPORT	EXPORT_SYMBOL(csum_partial_copy_from_user)
 
 #include "csumpartialcopygeneric.S"
 

+ 2 - 0
arch/arm/lib/delay.c

@@ -24,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/export.h>
 #include <linux/timex.h>
 
 /*
@@ -34,6 +35,7 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
 	.const_udelay	= __loop_const_udelay,
 	.udelay		= __loop_udelay,
 };
+EXPORT_SYMBOL(arm_delay_ops);
 
 static const struct delay_timer *delay_timer;
 static bool delay_calibrated;

+ 2 - 0
arch/arm/lib/div64.S

@@ -15,6 +15,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -210,3 +211,4 @@ Ldiv0_64:
 
 UNWIND(.fnend)
 ENDPROC(__do_div64)
+EXPORT_SYMBOL(__do_div64)

+ 9 - 0
arch/arm/lib/findbit.S

@@ -15,6 +15,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
                 .text
 
 /*
@@ -37,6 +38,7 @@ ENTRY(_find_first_zero_bit_le)
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_zero_bit_le)
+EXPORT_SYMBOL(_find_first_zero_bit_le)
 
 /*
  * Purpose  : Find next 'zero' bit
@@ -57,6 +59,7 @@ ENTRY(_find_next_zero_bit_le)
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_zero_bit_le)
+EXPORT_SYMBOL(_find_next_zero_bit_le)
 
 /*
  * Purpose  : Find a 'one' bit
@@ -78,6 +81,7 @@ ENTRY(_find_first_bit_le)
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_bit_le)
+EXPORT_SYMBOL(_find_first_bit_le)
 
 /*
  * Purpose  : Find next 'one' bit
@@ -97,6 +101,7 @@ ENTRY(_find_next_bit_le)
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_bit_le)
+EXPORT_SYMBOL(_find_next_bit_le)
 
 #ifdef __ARMEB__
 
@@ -116,6 +121,7 @@ ENTRY(_find_first_zero_bit_be)
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_zero_bit_be)
+EXPORT_SYMBOL(_find_first_zero_bit_be)
 
 ENTRY(_find_next_zero_bit_be)
 		teq	r1, #0
@@ -133,6 +139,7 @@ ENTRY(_find_next_zero_bit_be)
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_zero_bit_be)
+EXPORT_SYMBOL(_find_next_zero_bit_be)
 
 ENTRY(_find_first_bit_be)
 		teq	r1, #0
@@ -150,6 +157,7 @@ ENTRY(_find_first_bit_be)
 3:		mov	r0, r1			@ no free bits
 		ret	lr
 ENDPROC(_find_first_bit_be)
+EXPORT_SYMBOL(_find_first_bit_be)
 
 ENTRY(_find_next_bit_be)
 		teq	r1, #0
@@ -166,6 +174,7 @@ ENTRY(_find_next_bit_be)
 		add	r2, r2, #1		@ align bit pointer
 		b	2b			@ loop for next bit
 ENDPROC(_find_next_bit_be)
+EXPORT_SYMBOL(_find_next_bit_be)
 
 #endif
 

+ 9 - 0
arch/arm/lib/getuser.S

@@ -31,6 +31,7 @@
 #include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
+#include <asm/export.h>
 
 ENTRY(__get_user_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -38,6 +39,7 @@ ENTRY(__get_user_1)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
 
 ENTRY(__get_user_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +60,7 @@ rb	.req	r0
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
 
 ENTRY(__get_user_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +68,7 @@ ENTRY(__get_user_4)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
 
 ENTRY(__get_user_8)
 	check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -78,6 +82,7 @@ ENTRY(__get_user_8)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
 
 #ifdef __ARMEB__
 ENTRY(__get_user_32t_8)
@@ -91,6 +96,7 @@ ENTRY(__get_user_32t_8)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_32t_8)
+EXPORT_SYMBOL(__get_user_32t_8)
 
 ENTRY(__get_user_64t_1)
 	check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +104,7 @@ ENTRY(__get_user_64t_1)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_1)
+EXPORT_SYMBOL(__get_user_64t_1)
 
 ENTRY(__get_user_64t_2)
 	check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +121,7 @@ rb	.req	r0
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_2)
+EXPORT_SYMBOL(__get_user_64t_2)
 
 ENTRY(__get_user_64t_4)
 	check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +129,7 @@ ENTRY(__get_user_64t_4)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__get_user_64t_4)
+EXPORT_SYMBOL(__get_user_64t_4)
 #endif
 
 __get_user_bad8:

+ 2 - 0
arch/arm/lib/io-readsb.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Linsb_align:	rsb	ip, ip, #4
 		cmp	ip, r2
@@ -121,3 +122,4 @@ ENTRY(__raw_readsb)
 
 		ldmfd	sp!, {r4 - r6, pc}
 ENDPROC(__raw_readsb)
+EXPORT_SYMBOL(__raw_readsb)

+ 2 - 0
arch/arm/lib/io-readsl.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 ENTRY(__raw_readsl)
 		teq	r2, #0		@ do we have to check for the zero len?
@@ -77,3 +78,4 @@ ENTRY(__raw_readsl)
 		strb	r3, [r1, #0]
 		ret	lr
 ENDPROC(__raw_readsl)
+EXPORT_SYMBOL(__raw_readsl)

+ 2 - 1
arch/arm/lib/io-readsw-armv3.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Linsw_bad_alignment:
 		adr	r0, .Linsw_bad_align_msg
@@ -103,4 +104,4 @@ ENTRY(__raw_readsw)
 
 		ldmfd	sp!, {r4, r5, r6, pc}
 
-
+EXPORT_SYMBOL(__raw_readsw)

+ 2 - 0
arch/arm/lib/io-readsw-armv4.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	pack, rd, hw1, hw2
 #ifndef __ARMEB__
@@ -129,3 +130,4 @@ ENTRY(__raw_readsw)
 		strneb	ip, [r1]
 		ldmfd	sp!, {r4, pc}
 ENDPROC(__raw_readsw)
+EXPORT_SYMBOL(__raw_readsw)

+ 2 - 0
arch/arm/lib/io-writesb.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	outword, rd
 #ifndef __ARMEB__
@@ -92,3 +93,4 @@ ENTRY(__raw_writesb)
 
 		ldmfd	sp!, {r4, r5, pc}
 ENDPROC(__raw_writesb)
+EXPORT_SYMBOL(__raw_writesb)

+ 2 - 0
arch/arm/lib/io-writesl.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 ENTRY(__raw_writesl)
 		teq	r2, #0		@ do we have to check for the zero len?
@@ -65,3 +66,4 @@ ENTRY(__raw_writesl)
 		bne	6b
 		ret	lr
 ENDPROC(__raw_writesl)
+EXPORT_SYMBOL(__raw_writesl)

+ 2 - 0
arch/arm/lib/io-writesw-armv3.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 .Loutsw_bad_alignment:
 		adr	r0, .Loutsw_bad_align_msg
@@ -124,3 +125,4 @@ ENTRY(__raw_writesw)
 		strne	ip, [r0]
 
 		ldmfd	sp!, {r4, r5, r6, pc}
+EXPORT_SYMBOL(__raw_writesw)

+ 2 - 0
arch/arm/lib/io-writesw-armv4.S

@@ -9,6 +9,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.macro	outword, rd
 #ifndef __ARMEB__
@@ -98,3 +99,4 @@ ENTRY(__raw_writesw)
 		strneh	ip, [r0]
 		ret	lr
 ENDPROC(__raw_writesw)
+EXPORT_SYMBOL(__raw_writesw)

+ 9 - 0
arch/arm/lib/lib1funcs.S

@@ -36,6 +36,7 @@ Boston, MA 02111-1307, USA.  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 .macro ARM_DIV_BODY dividend, divisor, result, curbit
 
@@ -238,6 +239,8 @@ UNWIND(.fnstart)
 UNWIND(.fnend)
 ENDPROC(__udivsi3)
 ENDPROC(__aeabi_uidiv)
+EXPORT_SYMBOL(__udivsi3)
+EXPORT_SYMBOL(__aeabi_uidiv)
 
 ENTRY(__umodsi3)
 UNWIND(.fnstart)
@@ -256,6 +259,7 @@ UNWIND(.fnstart)
 
 UNWIND(.fnend)
 ENDPROC(__umodsi3)
+EXPORT_SYMBOL(__umodsi3)
 
 #ifdef CONFIG_ARM_PATCH_IDIV
 	.align 3
@@ -303,6 +307,8 @@ UNWIND(.fnstart)
 UNWIND(.fnend)
 ENDPROC(__divsi3)
 ENDPROC(__aeabi_idiv)
+EXPORT_SYMBOL(__divsi3)
+EXPORT_SYMBOL(__aeabi_idiv)
 
 ENTRY(__modsi3)
 UNWIND(.fnstart)
@@ -327,6 +333,7 @@ UNWIND(.fnstart)
 
 UNWIND(.fnend)
 ENDPROC(__modsi3)
+EXPORT_SYMBOL(__modsi3)
 
 #ifdef CONFIG_AEABI
 
@@ -343,6 +350,7 @@ UNWIND(.save {r0, r1, ip, lr}	)
 
 UNWIND(.fnend)
 ENDPROC(__aeabi_uidivmod)
+EXPORT_SYMBOL(__aeabi_uidivmod)
 
 ENTRY(__aeabi_idivmod)
 UNWIND(.fnstart)
@@ -356,6 +364,7 @@ UNWIND(.save {r0, r1, ip, lr}	)
 
 UNWIND(.fnend)
 ENDPROC(__aeabi_idivmod)
+EXPORT_SYMBOL(__aeabi_idivmod)
 
 #endif
 

+ 3 - 0
arch/arm/lib/lshrdi3.S

@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA.  */
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsr)
 
 ENDPROC(__lshrdi3)
 ENDPROC(__aeabi_llsr)
+EXPORT_SYMBOL(__lshrdi3)
+EXPORT_SYMBOL(__aeabi_llsr)

+ 2 - 0
arch/arm/lib/memchr.S

@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -24,3 +25,4 @@ ENTRY(memchr)
 2:	movne	r0, #0
 	ret	lr
 ENDPROC(memchr)
+EXPORT_SYMBOL(memchr)

+ 3 - 0
arch/arm/lib/memcpy.S

@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 #define LDR1W_SHIFT	0
 #define STR1W_SHIFT	0
@@ -68,3 +69,5 @@ ENTRY(memcpy)
 
 ENDPROC(memcpy)
 ENDPROC(mmiocpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(mmiocpy)

+ 2 - 0
arch/arm/lib/memmove.S

@@ -13,6 +13,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 		.text
 
@@ -225,3 +226,4 @@ ENTRY(memmove)
 18:		backward_copy_shift	push=24	pull=8
 
 ENDPROC(memmove)
+EXPORT_SYMBOL(memmove)

+ 3 - 0
arch/arm/lib/memset.S

@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -135,3 +136,5 @@ UNWIND( .fnstart            )
 UNWIND( .fnend   )
 ENDPROC(memset)
 ENDPROC(mmioset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(mmioset)

+ 2 - 0
arch/arm/lib/memzero.S

@@ -10,6 +10,7 @@
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 #include <asm/unwind.h>
+#include <asm/export.h>
 
 	.text
 	.align	5
@@ -135,3 +136,4 @@ UNWIND(	.fnstart			)
 	ret	lr			@ 1
 UNWIND(	.fnend				)
 ENDPROC(__memzero)
+EXPORT_SYMBOL(__memzero)

+ 3 - 0
arch/arm/lib/muldi3.S

@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -46,3 +47,5 @@ ENTRY(__aeabi_lmul)
 
 ENDPROC(__muldi3)
 ENDPROC(__aeabi_lmul)
+EXPORT_SYMBOL(__muldi3)
+EXPORT_SYMBOL(__aeabi_lmul)

+ 5 - 0
arch/arm/lib/putuser.S

@@ -31,6 +31,7 @@
 #include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
+#include <asm/export.h>
 
 ENTRY(__put_user_1)
 	check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -38,6 +39,7 @@ ENTRY(__put_user_1)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
 
 ENTRY(__put_user_2)
 	check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -62,6 +64,7 @@ ENTRY(__put_user_2)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
 
 ENTRY(__put_user_4)
 	check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -69,6 +72,7 @@ ENTRY(__put_user_4)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
 
 ENTRY(__put_user_8)
 	check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
 	mov	r0, #0
 	ret	lr
 ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
 
 __put_user_bad:
 	mov	r0, #-EFAULT

+ 2 - 0
arch/arm/lib/strchr.S

@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 		.align	5
@@ -25,3 +26,4 @@ ENTRY(strchr)
 		subeq	r0, r0, #1
 		ret	lr
 ENDPROC(strchr)
+EXPORT_SYMBOL(strchr)

+ 2 - 0
arch/arm/lib/strrchr.S

@@ -11,6 +11,7 @@
  */
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 		.text
 		.align	5
@@ -24,3 +25,4 @@ ENTRY(strrchr)
 		mov	r0, r3
 		ret	lr
 ENDPROC(strrchr)
+EXPORT_SYMBOL(strrchr)

+ 3 - 0
arch/arm/lib/uaccess_with_memcpy.c

@@ -19,6 +19,7 @@
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/hugetlb.h>
+#include <linux/export.h>
 #include <asm/current.h>
 #include <asm/page.h>
 
@@ -156,6 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 	}
 	return n;
 }
+EXPORT_SYMBOL(arm_copy_to_user);
 	
 static unsigned long noinline
 __clear_user_memset(void __user *addr, unsigned long n)
@@ -213,6 +215,7 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
 	}
 	return n;
 }
+EXPORT_SYMBOL(arm_clear_user);
 
 #if 0
 

+ 3 - 0
arch/arm/lib/ucmpdi2.S

@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 #ifdef __ARMEB__
 #define xh r0
@@ -35,6 +36,7 @@ ENTRY(__ucmpdi2)
 	ret	lr
 
 ENDPROC(__ucmpdi2)
+EXPORT_SYMBOL(__ucmpdi2)
 
 #ifdef CONFIG_AEABI
 
@@ -48,6 +50,7 @@ ENTRY(__aeabi_ulcmp)
 	ret	lr
 
 ENDPROC(__aeabi_ulcmp)
+EXPORT_SYMBOL(__aeabi_ulcmp)
 
 #endif
 

+ 0 - 1
arch/arm/mach-imx/Makefile

@@ -32,7 +32,6 @@ endif
 
 ifdef CONFIG_SND_IMX_SOC
 obj-y += ssi-fiq.o
-obj-y += ssi-fiq-ksym.o
 endif
 
 # i.MX21 based machines

+ 0 - 20
arch/arm/mach-imx/ssi-fiq-ksym.c

@@ -1,20 +0,0 @@
-/*
- * Exported ksyms for the SSI FIQ handler
- *
- * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-
-#include <linux/platform_data/asoc-imx-ssi.h>
-
-EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_start);
-EXPORT_SYMBOL(imx_ssi_fiq_end);
-EXPORT_SYMBOL(imx_ssi_fiq_base);
-

+ 6 - 1
arch/arm/mach-imx/ssi-fiq.S

@@ -8,6 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/export.h>
 
 /*
  * r8  = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -144,4 +145,8 @@ imx_ssi_fiq_tx_buffer:
 		.word 0x0
 .L_imx_ssi_fiq_end:
 imx_ssi_fiq_end:
-
+EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_start)
+EXPORT_SYMBOL(imx_ssi_fiq_end)
+EXPORT_SYMBOL(imx_ssi_fiq_base)

+ 1 - 1
arch/ia64/hp/sim/boot/Makefile

@@ -33,5 +33,5 @@ $(obj)/vmlinux.bin: vmlinux FORCE
 LDFLAGS_bootloader = -static -T
 
 $(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
-                   lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE
+                   lib/lib.a arch/ia64/lib/lib.a FORCE
 	$(call if_changed,ld)

+ 3 - 0
arch/ia64/include/asm/export.h

@@ -0,0 +1,3 @@
+/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
+#define KSYM_FUNC(name) @fptr(name)
+#include <asm-generic/export.h>

+ 3 - 0
arch/ia64/kernel/entry.S

@@ -48,6 +48,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/export.h>
 
 #include "minstate.h"
 
@@ -1345,12 +1346,14 @@ GLOBAL_ENTRY(unw_init_running)
 	mov rp=loc0
 	br.ret.sptk.many rp
 END(unw_init_running)
+EXPORT_SYMBOL(unw_init_running)
 
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 GLOBAL_ENTRY(_mcount)
 	br ftrace_stub
 END(_mcount)
+EXPORT_SYMBOL(_mcount)
 
 .here:
 	br.ret.sptk.many b0

+ 2 - 0
arch/ia64/kernel/esi_stub.S

@@ -35,6 +35,7 @@
 
 #include <asm/processor.h>
 #include <asm/asmmacro.h>
+#include <asm/export.h>
 
 /*
  * Inputs:
@@ -94,3 +95,4 @@ GLOBAL_ENTRY(esi_call_phys)
 	mov gp=loc2
 	br.ret.sptk.many rp
 END(esi_call_phys)
+EXPORT_SYMBOL_GPL(esi_call_phys)

+ 2 - 0
arch/ia64/kernel/head.S

@@ -32,6 +32,7 @@
 #include <asm/mca_asm.h>
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <asm/export.h>
 
 #ifdef CONFIG_HOTPLUG_CPU
 #define SAL_PSR_BITS_TO_SET				\
@@ -168,6 +169,7 @@ RestRR:											\
 	__PAGE_ALIGNED_DATA
 
 	.global empty_zero_page
+EXPORT_DATA_SYMBOL_GPL(empty_zero_page)
 empty_zero_page:
 	.skip PAGE_SIZE
 

+ 2 - 92
arch/ia64/kernel/ia64_ksyms.c

@@ -1,101 +1,11 @@
 /*
  * Architecture-specific kernel symbols
- *
- * Don't put any exports here unless it's defined in an assembler file.
- * All other exports should be put directly after the definition.
  */
 
-#include <linux/module.h>
-
-#include <linux/string.h>
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(strlen);
-
-#include <asm/pgtable.h>
-EXPORT_SYMBOL_GPL(empty_zero_page);
-
-#include <asm/checksum.h>
-EXPORT_SYMBOL(ip_fast_csum);		/* hand-coded assembly */
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#include <asm/page.h>
-EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(copy_page);
-
 #ifdef CONFIG_VIRTUAL_MEM_MAP
+#include <linux/compiler.h>
+#include <linux/export.h>
 #include <linux/bootmem.h>
 EXPORT_SYMBOL(min_low_pfn);	/* defined by bootmem.c, but not exported by generic code */
 EXPORT_SYMBOL(max_low_pfn);	/* defined by bootmem.c, but not exported by generic code */
 #endif
-
-#include <asm/processor.h>
-EXPORT_SYMBOL(ia64_cpu_info);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_per_cpu_offset);
-#endif
-
-#include <asm/uaccess.h>
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* from arch/ia64/lib */
-extern void __divsi3(void);
-extern void __udivsi3(void);
-extern void __modsi3(void);
-extern void __umodsi3(void);
-extern void __divdi3(void);
-extern void __udivdi3(void);
-extern void __moddi3(void);
-extern void __umoddi3(void);
-
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__umoddi3);
-
-#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
-extern void xor_ia64_2(void);
-extern void xor_ia64_3(void);
-extern void xor_ia64_4(void);
-extern void xor_ia64_5(void);
-
-EXPORT_SYMBOL(xor_ia64_2);
-EXPORT_SYMBOL(xor_ia64_3);
-EXPORT_SYMBOL(xor_ia64_4);
-EXPORT_SYMBOL(xor_ia64_5);
-#endif
-
-#include <asm/pal.h>
-EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
-EXPORT_SYMBOL(ia64_pal_call_phys_static);
-EXPORT_SYMBOL(ia64_pal_call_stacked);
-EXPORT_SYMBOL(ia64_pal_call_static);
-EXPORT_SYMBOL(ia64_load_scratch_fpregs);
-EXPORT_SYMBOL(ia64_save_scratch_fpregs);
-
-#include <asm/unwind.h>
-EXPORT_SYMBOL(unw_init_running);
-
-#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
-extern void esi_call_phys (void);
-EXPORT_SYMBOL_GPL(esi_call_phys);
-#endif
-extern char ia64_ivt[];
-EXPORT_SYMBOL(ia64_ivt);
-
-#include <asm/ftrace.h>
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
-EXPORT_SYMBOL(_mcount);
-#endif
-
-#include <asm/cacheflush.h>
-EXPORT_SYMBOL_GPL(flush_icache_range);

+ 2 - 0
arch/ia64/kernel/ivt.S

@@ -57,6 +57,7 @@
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 #include <asm/errno.h>
+#include <asm/export.h>
 
 #if 0
 # define PSR_DEFAULT_BITS	psr.ac
@@ -85,6 +86,7 @@
 
 	.align 32768	// align on 32KB boundary
 	.global ia64_ivt
+	EXPORT_DATA_SYMBOL(ia64_ivt)
 ia64_ivt:
 /////////////////////////////////////////////////////////////////////////////////////////
 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)

+ 7 - 0
arch/ia64/kernel/pal.S

@@ -14,6 +14,7 @@
 
 #include <asm/asmmacro.h>
 #include <asm/processor.h>
+#include <asm/export.h>
 
 	.data
 pal_entry_point:
@@ -87,6 +88,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_static)
+EXPORT_SYMBOL(ia64_pal_call_static)
 
 /*
  * Make a PAL call using the stacked registers calling convention.
@@ -122,6 +124,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked)
 	srlz.d				// serialize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_stacked)
+EXPORT_SYMBOL(ia64_pal_call_stacked)
 
 /*
  * Make a physical mode PAL call using the static registers calling convention.
@@ -193,6 +196,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_phys_static)
+EXPORT_SYMBOL(ia64_pal_call_phys_static)
 
 /*
  * Make a PAL call using the stacked registers in physical mode.
@@ -250,6 +254,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
 	srlz.d				// seralize restoration of psr.l
 	br.ret.sptk.many b0
 END(ia64_pal_call_phys_stacked)
+EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
 
 /*
  * Save scratch fp scratch regs which aren't saved in pt_regs already
@@ -275,6 +280,7 @@ GLOBAL_ENTRY(ia64_save_scratch_fpregs)
 	stf.spill [r2]  = f15,32
 	br.ret.sptk.many rp
 END(ia64_save_scratch_fpregs)
+EXPORT_SYMBOL(ia64_save_scratch_fpregs)
 
 /*
  * Load scratch fp scratch regs (fp10-fp15)
@@ -296,3 +302,4 @@ GLOBAL_ENTRY(ia64_load_scratch_fpregs)
 	ldf.fill  f15 = [r2],32
 	br.ret.sptk.many rp
 END(ia64_load_scratch_fpregs)
+EXPORT_SYMBOL(ia64_load_scratch_fpregs)

+ 4 - 0
arch/ia64/kernel/setup.c

@@ -71,7 +71,11 @@ EXPORT_SYMBOL(__per_cpu_offset);
 #endif
 
 DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(local_per_cpu_offset);
+#endif
 unsigned long ia64_cycles_per_usec;
 struct ia64_boot_param *ia64_boot_param;
 struct screen_info screen_info;

部分文件因文件數量過多而無法顯示