vmlinux-xip.lds.S 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* ld script to make ARM Linux kernel
  3. * taken from the i386 version by Russell King
  4. * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  5. */
  6. /* No __ro_after_init data in the .rodata section - which will always be ro */
  7. #define RO_AFTER_INIT_DATA
  8. #include <linux/sizes.h>
  9. #include <asm-generic/vmlinux.lds.h>
  10. #include <asm/cache.h>
  11. #include <asm/thread_info.h>
  12. #include <asm/memory.h>
  13. #include <asm/page.h>
  14. #define PROC_INFO \
  15. . = ALIGN(4); \
  16. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  17. *(.proc.info.init) \
  18. VMLINUX_SYMBOL(__proc_info_end) = .;
  19. #define IDMAP_TEXT \
  20. ALIGN_FUNCTION(); \
  21. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  22. *(.idmap.text) \
  23. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  24. . = ALIGN(PAGE_SIZE); \
  25. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  26. *(.hyp.idmap.text) \
  27. VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
  28. #ifdef CONFIG_HOTPLUG_CPU
  29. #define ARM_CPU_DISCARD(x)
  30. #define ARM_CPU_KEEP(x) x
  31. #else
  32. #define ARM_CPU_DISCARD(x) x
  33. #define ARM_CPU_KEEP(x)
  34. #endif
  35. #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
  36. defined(CONFIG_GENERIC_BUG)
  37. #define ARM_EXIT_KEEP(x) x
  38. #define ARM_EXIT_DISCARD(x)
  39. #else
  40. #define ARM_EXIT_KEEP(x)
  41. #define ARM_EXIT_DISCARD(x) x
  42. #endif
  43. OUTPUT_ARCH(arm)
  44. ENTRY(stext)
  45. #ifndef __ARMEB__
  46. jiffies = jiffies_64;
  47. #else
  48. jiffies = jiffies_64 + 4;
  49. #endif
  50. SECTIONS
  51. {
  52. /*
  53. * XXX: The linker does not define how output sections are
  54. * assigned to input sections when there are multiple statements
  55. * matching the same input section name. There is no documented
  56. * order of matching.
  57. *
  58. * unwind exit sections must be discarded before the rest of the
  59. * unwind sections get included.
  60. */
  61. /DISCARD/ : {
  62. *(.ARM.exidx.exit.text)
  63. *(.ARM.extab.exit.text)
  64. ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
  65. ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
  66. ARM_EXIT_DISCARD(EXIT_TEXT)
  67. ARM_EXIT_DISCARD(EXIT_DATA)
  68. EXIT_CALL
  69. #ifndef CONFIG_MMU
  70. *(.text.fixup)
  71. *(__ex_table)
  72. #endif
  73. *(.alt.smp.init)
  74. *(.discard)
  75. *(.discard.*)
  76. }
  77. . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
  78. _xiprom = .; /* XIP ROM area to be mapped */
  79. .head.text : {
  80. _text = .;
  81. HEAD_TEXT
  82. }
  83. .text : { /* Real text segment */
  84. _stext = .; /* Text and read-only data */
  85. IDMAP_TEXT
  86. __exception_text_start = .;
  87. *(.exception.text)
  88. __exception_text_end = .;
  89. IRQENTRY_TEXT
  90. TEXT_TEXT
  91. SCHED_TEXT
  92. CPUIDLE_TEXT
  93. LOCK_TEXT
  94. KPROBES_TEXT
  95. *(.gnu.warning)
  96. *(.glue_7)
  97. *(.glue_7t)
  98. . = ALIGN(4);
  99. *(.got) /* Global offset table */
  100. ARM_CPU_KEEP(PROC_INFO)
  101. }
  102. RO_DATA(PAGE_SIZE)
  103. . = ALIGN(4);
  104. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  105. __start___ex_table = .;
  106. #ifdef CONFIG_MMU
  107. *(__ex_table)
  108. #endif
  109. __stop___ex_table = .;
  110. }
  111. #ifdef CONFIG_ARM_UNWIND
  112. /*
  113. * Stack unwinding tables
  114. */
  115. . = ALIGN(8);
  116. .ARM.unwind_idx : {
  117. __start_unwind_idx = .;
  118. *(.ARM.exidx*)
  119. __stop_unwind_idx = .;
  120. }
  121. .ARM.unwind_tab : {
  122. __start_unwind_tab = .;
  123. *(.ARM.extab*)
  124. __stop_unwind_tab = .;
  125. }
  126. #endif
  127. NOTES
  128. _etext = .; /* End of text and rodata section */
  129. /*
  130. * The vectors and stubs are relocatable code, and the
  131. * only thing that matters is their relative offsets
  132. */
  133. __vectors_start = .;
  134. .vectors 0xffff0000 : AT(__vectors_start) {
  135. *(.vectors)
  136. }
  137. . = __vectors_start + SIZEOF(.vectors);
  138. __vectors_end = .;
  139. __stubs_start = .;
  140. .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
  141. *(.stubs)
  142. }
  143. . = __stubs_start + SIZEOF(.stubs);
  144. __stubs_end = .;
  145. PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
  146. INIT_TEXT_SECTION(8)
  147. .exit.text : {
  148. ARM_EXIT_KEEP(EXIT_TEXT)
  149. }
  150. .init.proc.info : {
  151. ARM_CPU_DISCARD(PROC_INFO)
  152. }
  153. .init.arch.info : {
  154. __arch_info_begin = .;
  155. *(.arch.info.init)
  156. __arch_info_end = .;
  157. }
  158. .init.tagtable : {
  159. __tagtable_begin = .;
  160. *(.taglist.init)
  161. __tagtable_end = .;
  162. }
  163. .init.rodata : {
  164. INIT_SETUP(16)
  165. INIT_CALLS
  166. CON_INITCALL
  167. SECURITY_INITCALL
  168. INIT_RAM_FS
  169. }
  170. #ifdef CONFIG_ARM_MPU
  171. . = ALIGN(SZ_128K);
  172. #endif
  173. _exiprom = .; /* End of XIP ROM area */
  174. /*
  175. * From this point, stuff is considered writable and will be copied to RAM
  176. */
  177. __data_loc = ALIGN(4); /* location in file */
  178. . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */
  179. #undef LOAD_OFFSET
  180. #define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc)
  181. . = ALIGN(THREAD_SIZE);
  182. _sdata = .;
  183. RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
  184. .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
  185. *(.data..ro_after_init)
  186. }
  187. _edata = .;
  188. . = ALIGN(PAGE_SIZE);
  189. __init_begin = .;
  190. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  191. INIT_DATA
  192. }
  193. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  194. ARM_EXIT_KEEP(EXIT_DATA)
  195. }
  196. #ifdef CONFIG_SMP
  197. PERCPU_SECTION(L1_CACHE_BYTES)
  198. #endif
  199. /*
  200. * End of copied data. We need a dummy section to get its LMA.
  201. * Also located before final ALIGN() as trailing padding is not stored
  202. * in the resulting binary file and useless to copy.
  203. */
  204. .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
  205. _edata_loc = LOADADDR(.data.endmark);
  206. . = ALIGN(PAGE_SIZE);
  207. __init_end = .;
  208. #ifdef CONFIG_HAVE_TCM
  209. /*
  210. * We align everything to a page boundary so we can
  211. * free it after init has commenced and TCM contents have
  212. * been copied to its destination.
  213. */
  214. .tcm_start : {
  215. . = ALIGN(PAGE_SIZE);
  216. __tcm_start = .;
  217. __itcm_start = .;
  218. }
  219. /*
  220. * Link these to the ITCM RAM
  221. * Put VMA to the TCM address and LMA to the common RAM
  222. * and we'll upload the contents from RAM to TCM and free
  223. * the used RAM after that.
  224. */
  225. .text_itcm ITCM_OFFSET : AT(__itcm_start)
  226. {
  227. __sitcm_text = .;
  228. *(.tcm.text)
  229. *(.tcm.rodata)
  230. . = ALIGN(4);
  231. __eitcm_text = .;
  232. }
  233. /*
  234. * Reset the dot pointer, this is needed to create the
  235. * relative __dtcm_start below (to be used as extern in code).
  236. */
  237. . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
  238. .dtcm_start : {
  239. __dtcm_start = .;
  240. }
  241. /* TODO: add remainder of ITCM as well, that can be used for data! */
  242. .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
  243. {
  244. . = ALIGN(4);
  245. __sdtcm_data = .;
  246. *(.tcm.data)
  247. . = ALIGN(4);
  248. __edtcm_data = .;
  249. }
  250. /* Reset the dot pointer or the linker gets confused */
  251. . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
  252. /* End marker for freeing TCM copy in linked object */
  253. .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
  254. . = ALIGN(PAGE_SIZE);
  255. __tcm_end = .;
  256. }
  257. #endif
  258. BSS_SECTION(0, 0, 8)
  259. _end = .;
  260. STABS_DEBUG
  261. }
  262. /*
  263. * These must never be empty
  264. * If you have to comment these two assert statements out, your
  265. * binutils is too old (for other reasons as well)
  266. */
  267. ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
  268. ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
  269. /*
  270. * The HYP init code can't be more than a page long,
  271. * and should not cross a page boundary.
  272. * The above comment applies as well.
  273. */
  274. ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
  275. "HYP init code too big or misaligned")
  276. #ifdef CONFIG_XIP_DEFLATED_DATA
  277. /*
  278. * The .bss is used as a stack area for __inflate_kernel_data() whose stack
  279. * frame is 9568 bytes. Make sure it has extra room left.
  280. */
  281. ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
  282. #endif
  283. #ifdef CONFIG_ARM_MPU
  284. /*
  285. * Due to PMSAv7 restriction on base address and size we have to
  286. * enforce minimal alignment restrictions. It was seen that weaker
  287. * alignment restriction on _xiprom will likely force XIP address
  288. * space spawns multiple MPU regions thus it is likely we run in
  289. * situation when we are reprogramming MPU region we run on with
  290. * something which doesn't cover reprogramming code itself, so as soon
  291. * as we update MPU settings we'd immediately try to execute straight
  292. * from background region which is XN.
  293. * It seem that alignment in 1M should suit most users.
  294. * _exiprom is aligned as 1/8 of 1M so can be covered by subregion
  295. * disable
  296. */
  297. ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
  298. ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
  299. #endif