vmlinux.lds.S 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later
  5. */
  6. #include <asm-generic/vmlinux.lds.h>
  7. #include <asm/mem_map.h>
  8. #include <asm/page.h>
  9. #include <asm/thread_info.h>
  10. OUTPUT_FORMAT("elf32-bfin")
  11. ENTRY(__start)
  12. _jiffies = _jiffies_64;
  13. SECTIONS
  14. {
  15. #ifdef CONFIG_RAMKERNEL
  16. . = CONFIG_BOOT_LOAD;
  17. #else
  18. . = CONFIG_ROM_BASE;
  19. #endif
  20. /* Neither the text, ro_data or bss section need to be aligned
  21. * So pack them back to back
  22. */
  23. .text :
  24. {
  25. __text = .;
  26. _text = .;
  27. __stext = .;
  28. TEXT_TEXT
  29. #ifndef CONFIG_SCHEDULE_L1
  30. SCHED_TEXT
  31. #endif
  32. CPUIDLE_TEXT
  33. LOCK_TEXT
  34. IRQENTRY_TEXT
  35. SOFTIRQENTRY_TEXT
  36. KPROBES_TEXT
  37. #ifdef CONFIG_ROMKERNEL
  38. __sinittext = .;
  39. INIT_TEXT
  40. __einittext = .;
  41. EXIT_TEXT
  42. #endif
  43. *(.text.*)
  44. *(.fixup)
  45. #if !L1_CODE_LENGTH
  46. *(.l1.text)
  47. #endif
  48. __etext = .;
  49. }
  50. EXCEPTION_TABLE(4)
  51. NOTES
  52. /* Just in case the first read only is a 32-bit access */
  53. RO_DATA(4)
  54. __rodata_end = .;
  55. #ifdef CONFIG_ROMKERNEL
  56. . = CONFIG_BOOT_LOAD;
  57. .bss : AT(__rodata_end)
  58. #else
  59. .bss :
  60. #endif
  61. {
  62. . = ALIGN(4);
  63. ___bss_start = .;
  64. *(.bss .bss.*)
  65. *(COMMON)
  66. #if !L1_DATA_A_LENGTH
  67. *(.l1.bss)
  68. #endif
  69. #if !L1_DATA_B_LENGTH
  70. *(.l1.bss.B)
  71. #endif
  72. . = ALIGN(4);
  73. ___bss_stop = .;
  74. }
  75. #if defined(CONFIG_ROMKERNEL)
  76. .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
  77. #else
  78. .data :
  79. #endif
  80. {
  81. __sdata = .;
  82. /* This gets done first, so the glob doesn't suck it in */
  83. CACHELINE_ALIGNED_DATA(32)
  84. #if !L1_DATA_A_LENGTH
  85. . = ALIGN(32);
  86. *(.data_l1.cacheline_aligned)
  87. *(.l1.data)
  88. #endif
  89. #if !L1_DATA_B_LENGTH
  90. *(.l1.data.B)
  91. #endif
  92. #if !L2_LENGTH
  93. . = ALIGN(32);
  94. *(.data_l2.cacheline_aligned)
  95. *(.l2.data)
  96. #endif
  97. DATA_DATA
  98. CONSTRUCTORS
  99. INIT_TASK_DATA(THREAD_SIZE)
  100. __edata = .;
  101. }
  102. __data_lma = LOADADDR(.data);
  103. __data_len = SIZEOF(.data);
  104. BUG_TABLE
  105. /* The init section should be last, so when we free it, it goes into
  106. * the general memory pool, and (hopefully) will decrease fragmentation
  107. * a tiny bit. The init section has a _requirement_ that it be
  108. * PAGE_SIZE aligned
  109. */
  110. . = ALIGN(PAGE_SIZE);
  111. ___init_begin = .;
  112. #ifdef CONFIG_RAMKERNEL
  113. INIT_TEXT_SECTION(PAGE_SIZE)
  114. /* We have to discard exit text and such at runtime, not link time, to
  115. * handle embedded cross-section references (alt instructions, bug
  116. * table, eh_frame, etc...). We need all of our .text up front and
  117. * .data after it for PCREL call issues.
  118. */
  119. .exit.text :
  120. {
  121. EXIT_TEXT
  122. }
  123. . = ALIGN(16);
  124. INIT_DATA_SECTION(16)
  125. PERCPU_SECTION(32)
  126. .exit.data :
  127. {
  128. EXIT_DATA
  129. }
  130. .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
  131. #else
  132. .init.data : AT(__data_lma + __data_len + 32)
  133. {
  134. __sinitdata = .;
  135. INIT_DATA
  136. INIT_SETUP(16)
  137. INIT_CALLS
  138. CON_INITCALL
  139. SECURITY_INITCALL
  140. INIT_RAM_FS
  141. . = ALIGN(PAGE_SIZE);
  142. ___per_cpu_load = .;
  143. PERCPU_INPUT(32)
  144. EXIT_DATA
  145. __einitdata = .;
  146. }
  147. __init_data_lma = LOADADDR(.init.data);
  148. __init_data_len = SIZEOF(.init.data);
  149. __init_data_end = .;
  150. .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
  151. #endif
  152. {
  153. . = ALIGN(4);
  154. __stext_l1 = .;
  155. *(.l1.text.head)
  156. *(.l1.text)
  157. #ifdef CONFIG_SCHEDULE_L1
  158. SCHED_TEXT
  159. #endif
  160. . = ALIGN(4);
  161. __etext_l1 = .;
  162. }
  163. __text_l1_lma = LOADADDR(.text_l1);
  164. __text_l1_len = SIZEOF(.text_l1);
  165. ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
  166. .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
  167. {
  168. . = ALIGN(4);
  169. __sdata_l1 = .;
  170. *(.l1.data)
  171. __edata_l1 = .;
  172. . = ALIGN(32);
  173. *(.data_l1.cacheline_aligned)
  174. . = ALIGN(4);
  175. __sbss_l1 = .;
  176. *(.l1.bss)
  177. . = ALIGN(4);
  178. __ebss_l1 = .;
  179. }
  180. __data_l1_lma = LOADADDR(.data_l1);
  181. __data_l1_len = SIZEOF(.data_l1);
  182. ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  183. .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
  184. {
  185. . = ALIGN(4);
  186. __sdata_b_l1 = .;
  187. *(.l1.data.B)
  188. __edata_b_l1 = .;
  189. . = ALIGN(4);
  190. __sbss_b_l1 = .;
  191. *(.l1.bss.B)
  192. . = ALIGN(4);
  193. __ebss_b_l1 = .;
  194. }
  195. __data_b_l1_lma = LOADADDR(.data_b_l1);
  196. __data_b_l1_len = SIZEOF(.data_b_l1);
  197. ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  198. .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
  199. {
  200. . = ALIGN(4);
  201. __stext_l2 = .;
  202. *(.l2.text)
  203. . = ALIGN(4);
  204. __etext_l2 = .;
  205. . = ALIGN(4);
  206. __sdata_l2 = .;
  207. *(.l2.data)
  208. __edata_l2 = .;
  209. . = ALIGN(32);
  210. *(.data_l2.cacheline_aligned)
  211. . = ALIGN(4);
  212. __sbss_l2 = .;
  213. *(.l2.bss)
  214. . = ALIGN(4);
  215. __ebss_l2 = .;
  216. }
  217. __l2_lma = LOADADDR(.text_data_l2);
  218. __l2_len = SIZEOF(.text_data_l2);
  219. ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
  220. /* Force trailing alignment of our init section so that when we
  221. * free our init memory, we don't leave behind a partial page.
  222. */
  223. #ifdef CONFIG_RAMKERNEL
  224. . = __l2_lma + __l2_len;
  225. #else
  226. . = __init_data_end;
  227. #endif
  228. . = ALIGN(PAGE_SIZE);
  229. ___init_end = .;
  230. __end =.;
  231. STABS_DEBUG
  232. DWARF_DEBUG
  233. DISCARDS
  234. }