vmlinux.lds.S 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353
  1. /* ld script to make ARM Linux kernel
  2. * taken from the i386 version by Russell King
  3. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  4. */
  5. #include <asm-generic/vmlinux.lds.h>
  6. #include <asm/cache.h>
  7. #include <asm/thread_info.h>
  8. #include <asm/memory.h>
  9. #include <asm/page.h>
  10. #ifdef CONFIG_ARM_KERNMEM_PERMS
  11. #include <asm/pgtable.h>
  12. #endif
  13. #define PROC_INFO \
  14. . = ALIGN(4); \
  15. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  16. *(.proc.info.init) \
  17. VMLINUX_SYMBOL(__proc_info_end) = .;
  18. #define IDMAP_TEXT \
  19. ALIGN_FUNCTION(); \
  20. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  21. *(.idmap.text) \
  22. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  23. . = ALIGN(PAGE_SIZE); \
  24. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  25. *(.hyp.idmap.text) \
  26. VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
  27. #ifdef CONFIG_HOTPLUG_CPU
  28. #define ARM_CPU_DISCARD(x)
  29. #define ARM_CPU_KEEP(x) x
  30. #else
  31. #define ARM_CPU_DISCARD(x) x
  32. #define ARM_CPU_KEEP(x)
  33. #endif
  34. #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
  35. defined(CONFIG_GENERIC_BUG)
  36. #define ARM_EXIT_KEEP(x) x
  37. #define ARM_EXIT_DISCARD(x)
  38. #else
  39. #define ARM_EXIT_KEEP(x)
  40. #define ARM_EXIT_DISCARD(x) x
  41. #endif
  42. OUTPUT_ARCH(arm)
  43. ENTRY(stext)
  44. #ifndef __ARMEB__
  45. jiffies = jiffies_64;
  46. #else
  47. jiffies = jiffies_64 + 4;
  48. #endif
  49. SECTIONS
  50. {
  51. /*
  52. * XXX: The linker does not define how output sections are
  53. * assigned to input sections when there are multiple statements
  54. * matching the same input section name. There is no documented
  55. * order of matching.
  56. *
  57. * unwind exit sections must be discarded before the rest of the
  58. * unwind sections get included.
  59. */
  60. /DISCARD/ : {
  61. *(.ARM.exidx.exit.text)
  62. *(.ARM.extab.exit.text)
  63. ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
  64. ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
  65. ARM_EXIT_DISCARD(EXIT_TEXT)
  66. ARM_EXIT_DISCARD(EXIT_DATA)
  67. EXIT_CALL
  68. #ifndef CONFIG_MMU
  69. *(.text.fixup)
  70. *(__ex_table)
  71. #endif
  72. #ifndef CONFIG_SMP_ON_UP
  73. *(.alt.smp.init)
  74. #endif
  75. *(.discard)
  76. *(.discard.*)
  77. }
  78. #ifdef CONFIG_XIP_KERNEL
  79. . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
  80. #else
  81. . = PAGE_OFFSET + TEXT_OFFSET;
  82. #endif
  83. .head.text : {
  84. _text = .;
  85. HEAD_TEXT
  86. }
  87. #ifdef CONFIG_ARM_KERNMEM_PERMS
  88. . = ALIGN(1<<SECTION_SHIFT);
  89. #endif
  90. .text : { /* Real text segment */
  91. _stext = .; /* Text and read-only data */
  92. IDMAP_TEXT
  93. __exception_text_start = .;
  94. *(.exception.text)
  95. __exception_text_end = .;
  96. IRQENTRY_TEXT
  97. TEXT_TEXT
  98. SCHED_TEXT
  99. LOCK_TEXT
  100. KPROBES_TEXT
  101. *(.gnu.warning)
  102. *(.glue_7)
  103. *(.glue_7t)
  104. . = ALIGN(4);
  105. *(.got) /* Global offset table */
  106. ARM_CPU_KEEP(PROC_INFO)
  107. }
  108. #ifdef CONFIG_DEBUG_RODATA
  109. . = ALIGN(1<<SECTION_SHIFT);
  110. #endif
  111. RO_DATA(PAGE_SIZE)
  112. . = ALIGN(4);
  113. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  114. __start___ex_table = .;
  115. #ifdef CONFIG_MMU
  116. *(__ex_table)
  117. #endif
  118. __stop___ex_table = .;
  119. }
  120. #ifdef CONFIG_ARM_UNWIND
  121. /*
  122. * Stack unwinding tables
  123. */
  124. . = ALIGN(8);
  125. .ARM.unwind_idx : {
  126. __start_unwind_idx = .;
  127. *(.ARM.exidx*)
  128. __stop_unwind_idx = .;
  129. }
  130. .ARM.unwind_tab : {
  131. __start_unwind_tab = .;
  132. *(.ARM.extab*)
  133. __stop_unwind_tab = .;
  134. }
  135. #endif
  136. NOTES
  137. _etext = .; /* End of text and rodata section */
  138. #ifndef CONFIG_XIP_KERNEL
  139. # ifdef CONFIG_ARM_KERNMEM_PERMS
  140. . = ALIGN(1<<SECTION_SHIFT);
  141. # else
  142. . = ALIGN(PAGE_SIZE);
  143. # endif
  144. __init_begin = .;
  145. #endif
  146. /*
  147. * The vectors and stubs are relocatable code, and the
  148. * only thing that matters is their relative offsets
  149. */
  150. __vectors_start = .;
  151. .vectors 0 : AT(__vectors_start) {
  152. *(.vectors)
  153. }
  154. . = __vectors_start + SIZEOF(.vectors);
  155. __vectors_end = .;
  156. __stubs_start = .;
  157. .stubs 0x1000 : AT(__stubs_start) {
  158. *(.stubs)
  159. }
  160. . = __stubs_start + SIZEOF(.stubs);
  161. __stubs_end = .;
  162. INIT_TEXT_SECTION(8)
  163. .exit.text : {
  164. ARM_EXIT_KEEP(EXIT_TEXT)
  165. }
  166. .init.proc.info : {
  167. ARM_CPU_DISCARD(PROC_INFO)
  168. }
  169. .init.arch.info : {
  170. __arch_info_begin = .;
  171. *(.arch.info.init)
  172. __arch_info_end = .;
  173. }
  174. .init.tagtable : {
  175. __tagtable_begin = .;
  176. *(.taglist.init)
  177. __tagtable_end = .;
  178. }
  179. #ifdef CONFIG_SMP_ON_UP
  180. .init.smpalt : {
  181. __smpalt_begin = .;
  182. *(.alt.smp.init)
  183. __smpalt_end = .;
  184. }
  185. #endif
  186. .init.pv_table : {
  187. __pv_table_begin = .;
  188. *(.pv_table)
  189. __pv_table_end = .;
  190. }
  191. .init.data : {
  192. #ifndef CONFIG_XIP_KERNEL
  193. INIT_DATA
  194. #endif
  195. INIT_SETUP(16)
  196. INIT_CALLS
  197. CON_INITCALL
  198. SECURITY_INITCALL
  199. INIT_RAM_FS
  200. }
  201. #ifndef CONFIG_XIP_KERNEL
  202. .exit.data : {
  203. ARM_EXIT_KEEP(EXIT_DATA)
  204. }
  205. #endif
  206. #ifdef CONFIG_SMP
  207. PERCPU_SECTION(L1_CACHE_BYTES)
  208. #endif
  209. #ifdef CONFIG_XIP_KERNEL
  210. __data_loc = ALIGN(4); /* location in binary */
  211. . = PAGE_OFFSET + TEXT_OFFSET;
  212. #else
  213. #ifdef CONFIG_ARM_KERNMEM_PERMS
  214. . = ALIGN(1<<SECTION_SHIFT);
  215. #else
  216. . = ALIGN(THREAD_SIZE);
  217. #endif
  218. __init_end = .;
  219. __data_loc = .;
  220. #endif
  221. .data : AT(__data_loc) {
  222. _data = .; /* address in memory */
  223. _sdata = .;
  224. /*
  225. * first, the init task union, aligned
  226. * to an 8192 byte boundary.
  227. */
  228. INIT_TASK_DATA(THREAD_SIZE)
  229. #ifdef CONFIG_XIP_KERNEL
  230. . = ALIGN(PAGE_SIZE);
  231. __init_begin = .;
  232. INIT_DATA
  233. ARM_EXIT_KEEP(EXIT_DATA)
  234. . = ALIGN(PAGE_SIZE);
  235. __init_end = .;
  236. #endif
  237. NOSAVE_DATA
  238. CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
  239. READ_MOSTLY_DATA(L1_CACHE_BYTES)
  240. /*
  241. * and the usual data section
  242. */
  243. DATA_DATA
  244. CONSTRUCTORS
  245. _edata = .;
  246. }
  247. _edata_loc = __data_loc + SIZEOF(.data);
  248. #ifdef CONFIG_HAVE_TCM
  249. /*
  250. * We align everything to a page boundary so we can
  251. * free it after init has commenced and TCM contents have
  252. * been copied to its destination.
  253. */
  254. .tcm_start : {
  255. . = ALIGN(PAGE_SIZE);
  256. __tcm_start = .;
  257. __itcm_start = .;
  258. }
  259. /*
  260. * Link these to the ITCM RAM
  261. * Put VMA to the TCM address and LMA to the common RAM
  262. * and we'll upload the contents from RAM to TCM and free
  263. * the used RAM after that.
  264. */
  265. .text_itcm ITCM_OFFSET : AT(__itcm_start)
  266. {
  267. __sitcm_text = .;
  268. *(.tcm.text)
  269. *(.tcm.rodata)
  270. . = ALIGN(4);
  271. __eitcm_text = .;
  272. }
  273. /*
  274. * Reset the dot pointer, this is needed to create the
  275. * relative __dtcm_start below (to be used as extern in code).
  276. */
  277. . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
  278. .dtcm_start : {
  279. __dtcm_start = .;
  280. }
  281. /* TODO: add remainder of ITCM as well, that can be used for data! */
  282. .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
  283. {
  284. . = ALIGN(4);
  285. __sdtcm_data = .;
  286. *(.tcm.data)
  287. . = ALIGN(4);
  288. __edtcm_data = .;
  289. }
  290. /* Reset the dot pointer or the linker gets confused */
  291. . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
  292. /* End marker for freeing TCM copy in linked object */
  293. .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
  294. . = ALIGN(PAGE_SIZE);
  295. __tcm_end = .;
  296. }
  297. #endif
  298. BSS_SECTION(0, 0, 0)
  299. _end = .;
  300. STABS_DEBUG
  301. }
  302. /*
  303. * These must never be empty
  304. * If you have to comment these two assert statements out, your
  305. * binutils is too old (for other reasons as well)
  306. */
  307. ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
  308. ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
  309. /*
  310. * The HYP init code can't be more than a page long,
  311. * and should not cross a page boundary.
  312. * The above comment applies as well.
  313. */
  314. ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
  315. "HYP init code too big or misaligned")