head.S 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568
  1. /*
  2. * OpenRISC head.S
  3. *
  4. * Linux architectural port borrowing liberally from similar works of
  5. * others. All original copyrights apply as per the original source
  6. * declaration.
  7. *
  8. * Modifications for the OpenRISC architecture:
  9. * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
  10. * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/linkage.h>
  18. #include <linux/threads.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/serial_reg.h>
  22. #include <asm/processor.h>
  23. #include <asm/page.h>
  24. #include <asm/mmu.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/cache.h>
  28. #include <asm/spr_defs.h>
  29. #include <asm/asm-offsets.h>
  30. #include <linux/of_fdt.h>
  31. #define tophys(rd,rs) \
  32. l.movhi rd,hi(-KERNELBASE) ;\
  33. l.add rd,rd,rs
  34. #define CLEAR_GPR(gpr) \
  35. l.movhi gpr,0x0
  36. #define LOAD_SYMBOL_2_GPR(gpr,symbol) \
  37. l.movhi gpr,hi(symbol) ;\
  38. l.ori gpr,gpr,lo(symbol)
  39. #define UART_BASE_ADD 0x90000000
  40. #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_SM)
  41. #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_DCE | SPR_SR_ICE | SPR_SR_IEE | SPR_SR_TEE | SPR_SR_SM)
  42. /* ============================================[ tmp store locations ]=== */
  43. /*
  44. * emergency_print temporary stores
  45. */
  46. #define EMERGENCY_PRINT_STORE_GPR4 l.sw 0x20(r0),r4
  47. #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz r4,0x20(r0)
  48. #define EMERGENCY_PRINT_STORE_GPR5 l.sw 0x24(r0),r5
  49. #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz r5,0x24(r0)
  50. #define EMERGENCY_PRINT_STORE_GPR6 l.sw 0x28(r0),r6
  51. #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz r6,0x28(r0)
  52. #define EMERGENCY_PRINT_STORE_GPR7 l.sw 0x2c(r0),r7
  53. #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz r7,0x2c(r0)
  54. #define EMERGENCY_PRINT_STORE_GPR8 l.sw 0x30(r0),r8
  55. #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz r8,0x30(r0)
  56. #define EMERGENCY_PRINT_STORE_GPR9 l.sw 0x34(r0),r9
  57. #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz r9,0x34(r0)
  58. /*
  59. * TLB miss handlers temorary stores
  60. */
  61. #define EXCEPTION_STORE_GPR9 l.sw 0x10(r0),r9
  62. #define EXCEPTION_LOAD_GPR9 l.lwz r9,0x10(r0)
  63. #define EXCEPTION_STORE_GPR2 l.sw 0x64(r0),r2
  64. #define EXCEPTION_LOAD_GPR2 l.lwz r2,0x64(r0)
  65. #define EXCEPTION_STORE_GPR3 l.sw 0x68(r0),r3
  66. #define EXCEPTION_LOAD_GPR3 l.lwz r3,0x68(r0)
  67. #define EXCEPTION_STORE_GPR4 l.sw 0x6c(r0),r4
  68. #define EXCEPTION_LOAD_GPR4 l.lwz r4,0x6c(r0)
  69. #define EXCEPTION_STORE_GPR5 l.sw 0x70(r0),r5
  70. #define EXCEPTION_LOAD_GPR5 l.lwz r5,0x70(r0)
  71. #define EXCEPTION_STORE_GPR6 l.sw 0x74(r0),r6
  72. #define EXCEPTION_LOAD_GPR6 l.lwz r6,0x74(r0)
  73. /*
  74. * EXCEPTION_HANDLE temporary stores
  75. */
  76. #define EXCEPTION_T_STORE_GPR30 l.sw 0x78(r0),r30
  77. #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz reg,0x78(r0)
  78. #define EXCEPTION_T_STORE_GPR10 l.sw 0x7c(r0),r10
  79. #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz reg,0x7c(r0)
  80. #define EXCEPTION_T_STORE_SP l.sw 0x80(r0),r1
  81. #define EXCEPTION_T_LOAD_SP(reg) l.lwz reg,0x80(r0)
  82. /*
  83. * For UNHANLDED_EXCEPTION
  84. */
  85. #define EXCEPTION_T_STORE_GPR31 l.sw 0x84(r0),r31
  86. #define EXCEPTION_T_LOAD_GPR31(reg) l.lwz reg,0x84(r0)
  87. /* =========================================================[ macros ]=== */
  88. #define GET_CURRENT_PGD(reg,t1) \
  89. LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
  90. tophys (t1,reg) ;\
  91. l.lwz reg,0(t1)
  92. /*
  93. * DSCR: this is a common hook for handling exceptions. it will save
  94. * the needed registers, set up stack and pointer to current
  95. * then jump to the handler while enabling MMU
  96. *
  97. * PRMS: handler - a function to jump to. it has to save the
  98. * remaining registers to kernel stack, call
  99. * appropriate arch-independant exception handler
  100. * and finaly jump to ret_from_except
  101. *
  102. * PREQ: unchanged state from the time exception happened
  103. *
  104. * POST: SAVED the following registers original value
  105. * to the new created exception frame pointed to by r1
  106. *
  107. * r1 - ksp pointing to the new (exception) frame
  108. * r4 - EEAR exception EA
  109. * r10 - current pointing to current_thread_info struct
  110. * r12 - syscall 0, since we didn't come from syscall
  111. * r13 - temp it actually contains new SR, not needed anymore
  112. * r31 - handler address of the handler we'll jump to
  113. *
  114. * handler has to save remaining registers to the exception
  115. * ksp frame *before* tainting them!
  116. *
  117. * NOTE: this function is not reentrant per se. reentrancy is guaranteed
  118. * by processor disabling all exceptions/interrupts when exception
  119. * accours.
  120. *
  121. * OPTM: no need to make it so wasteful to extract ksp when in user mode
  122. */
  123. #define EXCEPTION_HANDLE(handler) \
  124. EXCEPTION_T_STORE_GPR30 ;\
  125. l.mfspr r30,r0,SPR_ESR_BASE ;\
  126. l.andi r30,r30,SPR_SR_SM ;\
  127. l.sfeqi r30,0 ;\
  128. EXCEPTION_T_STORE_GPR10 ;\
  129. l.bnf 2f /* kernel_mode */ ;\
  130. EXCEPTION_T_STORE_SP /* delay slot */ ;\
  131. 1: /* user_mode: */ ;\
  132. LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
  133. tophys (r30,r1) ;\
  134. /* r10: current_thread_info */ ;\
  135. l.lwz r10,0(r30) ;\
  136. tophys (r30,r10) ;\
  137. l.lwz r1,(TI_KSP)(r30) ;\
  138. /* fall through */ ;\
  139. 2: /* kernel_mode: */ ;\
  140. /* create new stack frame, save only needed gprs */ ;\
  141. /* r1: KSP, r10: current, r4: EEAR, r31: __pa(KSP) */ ;\
  142. /* r12: temp, syscall indicator */ ;\
  143. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  144. /* r1 is KSP, r30 is __pa(KSP) */ ;\
  145. tophys (r30,r1) ;\
  146. l.sw PT_GPR12(r30),r12 ;\
  147. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  148. l.sw PT_PC(r30),r12 ;\
  149. l.mfspr r12,r0,SPR_ESR_BASE ;\
  150. l.sw PT_SR(r30),r12 ;\
  151. /* save r30 */ ;\
  152. EXCEPTION_T_LOAD_GPR30(r12) ;\
  153. l.sw PT_GPR30(r30),r12 ;\
  154. /* save r10 as was prior to exception */ ;\
  155. EXCEPTION_T_LOAD_GPR10(r12) ;\
  156. l.sw PT_GPR10(r30),r12 ;\
  157. /* save PT_SP as was prior to exception */ ;\
  158. EXCEPTION_T_LOAD_SP(r12) ;\
  159. l.sw PT_SP(r30),r12 ;\
  160. /* save exception r4, set r4 = EA */ ;\
  161. l.sw PT_GPR4(r30),r4 ;\
  162. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  163. /* r12 == 1 if we come from syscall */ ;\
  164. CLEAR_GPR(r12) ;\
  165. /* ----- turn on MMU ----- */ ;\
  166. l.ori r30,r0,(EXCEPTION_SR) ;\
  167. l.mtspr r0,r30,SPR_ESR_BASE ;\
  168. /* r30: EA address of handler */ ;\
  169. LOAD_SYMBOL_2_GPR(r30,handler) ;\
  170. l.mtspr r0,r30,SPR_EPCR_BASE ;\
  171. l.rfe
  172. /*
  173. * this doesn't work
  174. *
  175. *
  176. * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION
  177. * #define UNHANDLED_EXCEPTION(handler) \
  178. * l.ori r3,r0,0x1 ;\
  179. * l.mtspr r0,r3,SPR_SR ;\
  180. * l.movhi r3,hi(0xf0000100) ;\
  181. * l.ori r3,r3,lo(0xf0000100) ;\
  182. * l.jr r3 ;\
  183. * l.nop 1
  184. *
  185. * #endif
  186. */
  187. /* DSCR: this is the same as EXCEPTION_HANDLE(), we are just
  188. * a bit more carefull (if we have a PT_SP or current pointer
  189. * corruption) and set them up from 'current_set'
  190. *
  191. */
  192. #define UNHANDLED_EXCEPTION(handler) \
  193. EXCEPTION_T_STORE_GPR31 ;\
  194. EXCEPTION_T_STORE_GPR10 ;\
  195. EXCEPTION_T_STORE_SP ;\
  196. /* temporary store r3, r9 into r1, r10 */ ;\
  197. l.addi r1,r3,0x0 ;\
  198. l.addi r10,r9,0x0 ;\
  199. /* the string referenced by r3 must be low enough */ ;\
  200. l.jal _emergency_print ;\
  201. l.ori r3,r0,lo(_string_unhandled_exception) ;\
  202. l.mfspr r3,r0,SPR_NPC ;\
  203. l.jal _emergency_print_nr ;\
  204. l.andi r3,r3,0x1f00 ;\
  205. /* the string referenced by r3 must be low enough */ ;\
  206. l.jal _emergency_print ;\
  207. l.ori r3,r0,lo(_string_epc_prefix) ;\
  208. l.jal _emergency_print_nr ;\
  209. l.mfspr r3,r0,SPR_EPCR_BASE ;\
  210. l.jal _emergency_print ;\
  211. l.ori r3,r0,lo(_string_nl) ;\
  212. /* end of printing */ ;\
  213. l.addi r3,r1,0x0 ;\
  214. l.addi r9,r10,0x0 ;\
  215. /* extract current, ksp from current_set */ ;\
  216. LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_top) ;\
  217. LOAD_SYMBOL_2_GPR(r10,init_thread_union) ;\
  218. /* create new stack frame, save only needed gprs */ ;\
  219. /* r1: KSP, r10: current, r31: __pa(KSP) */ ;\
  220. /* r12: temp, syscall indicator, r13 temp */ ;\
  221. l.addi r1,r1,-(INT_FRAME_SIZE) ;\
  222. /* r1 is KSP, r31 is __pa(KSP) */ ;\
  223. tophys (r31,r1) ;\
  224. l.sw PT_GPR12(r31),r12 ;\
  225. l.mfspr r12,r0,SPR_EPCR_BASE ;\
  226. l.sw PT_PC(r31),r12 ;\
  227. l.mfspr r12,r0,SPR_ESR_BASE ;\
  228. l.sw PT_SR(r31),r12 ;\
  229. /* save r31 */ ;\
  230. EXCEPTION_T_LOAD_GPR31(r12) ;\
  231. l.sw PT_GPR31(r31),r12 ;\
  232. /* save r10 as was prior to exception */ ;\
  233. EXCEPTION_T_LOAD_GPR10(r12) ;\
  234. l.sw PT_GPR10(r31),r12 ;\
  235. /* save PT_SP as was prior to exception */ ;\
  236. EXCEPTION_T_LOAD_SP(r12) ;\
  237. l.sw PT_SP(r31),r12 ;\
  238. l.sw PT_GPR13(r31),r13 ;\
  239. /* --> */ ;\
  240. /* save exception r4, set r4 = EA */ ;\
  241. l.sw PT_GPR4(r31),r4 ;\
  242. l.mfspr r4,r0,SPR_EEAR_BASE ;\
  243. /* r12 == 1 if we come from syscall */ ;\
  244. CLEAR_GPR(r12) ;\
  245. /* ----- play a MMU trick ----- */ ;\
  246. l.ori r31,r0,(EXCEPTION_SR) ;\
  247. l.mtspr r0,r31,SPR_ESR_BASE ;\
  248. /* r31: EA address of handler */ ;\
  249. LOAD_SYMBOL_2_GPR(r31,handler) ;\
  250. l.mtspr r0,r31,SPR_EPCR_BASE ;\
  251. l.rfe
  252. /* =====================================================[ exceptions] === */
  253. /* ---[ 0x100: RESET exception ]----------------------------------------- */
  254. .org 0x100
  255. /* Jump to .init code at _start which lives in the .head section
  256. * and will be discarded after boot.
  257. */
  258. LOAD_SYMBOL_2_GPR(r15, _start)
  259. tophys (r13,r15) /* MMU disabled */
  260. l.jr r13
  261. l.nop
  262. /* ---[ 0x200: BUS exception ]------------------------------------------- */
  263. .org 0x200
  264. _dispatch_bus_fault:
  265. EXCEPTION_HANDLE(_bus_fault_handler)
  266. /* ---[ 0x300: Data Page Fault exception ]------------------------------- */
  267. .org 0x300
  268. _dispatch_do_dpage_fault:
  269. // totaly disable timer interrupt
  270. // l.mtspr r0,r0,SPR_TTMR
  271. // DEBUG_TLB_PROBE(0x300)
  272. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300)
  273. EXCEPTION_HANDLE(_data_page_fault_handler)
  274. /* ---[ 0x400: Insn Page Fault exception ]------------------------------- */
  275. .org 0x400
  276. _dispatch_do_ipage_fault:
  277. // totaly disable timer interrupt
  278. // l.mtspr r0,r0,SPR_TTMR
  279. // DEBUG_TLB_PROBE(0x400)
  280. // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400)
  281. EXCEPTION_HANDLE(_insn_page_fault_handler)
  282. /* ---[ 0x500: Timer exception ]----------------------------------------- */
  283. .org 0x500
  284. EXCEPTION_HANDLE(_timer_handler)
  285. /* ---[ 0x600: Alignment exception ]-------------------------------------- */
  286. .org 0x600
  287. EXCEPTION_HANDLE(_alignment_handler)
  288. /* ---[ 0x700: Illegal insn exception ]---------------------------------- */
  289. .org 0x700
  290. EXCEPTION_HANDLE(_illegal_instruction_handler)
  291. /* ---[ 0x800: External interrupt exception ]---------------------------- */
  292. .org 0x800
  293. EXCEPTION_HANDLE(_external_irq_handler)
  294. /* ---[ 0x900: DTLB miss exception ]------------------------------------- */
  295. .org 0x900
  296. l.j boot_dtlb_miss_handler
  297. l.nop
  298. /* ---[ 0xa00: ITLB miss exception ]------------------------------------- */
  299. .org 0xa00
  300. l.j boot_itlb_miss_handler
  301. l.nop
  302. /* ---[ 0xb00: Range exception ]----------------------------------------- */
  303. .org 0xb00
  304. UNHANDLED_EXCEPTION(_vector_0xb00)
  305. /* ---[ 0xc00: Syscall exception ]--------------------------------------- */
  306. .org 0xc00
  307. EXCEPTION_HANDLE(_sys_call_handler)
  308. /* ---[ 0xd00: Trap exception ]------------------------------------------ */
  309. .org 0xd00
  310. UNHANDLED_EXCEPTION(_vector_0xd00)
  311. /* ---[ 0xe00: Trap exception ]------------------------------------------ */
  312. .org 0xe00
  313. // UNHANDLED_EXCEPTION(_vector_0xe00)
  314. EXCEPTION_HANDLE(_trap_handler)
  315. /* ---[ 0xf00: Reserved exception ]-------------------------------------- */
  316. .org 0xf00
  317. UNHANDLED_EXCEPTION(_vector_0xf00)
  318. /* ---[ 0x1000: Reserved exception ]------------------------------------- */
  319. .org 0x1000
  320. UNHANDLED_EXCEPTION(_vector_0x1000)
  321. /* ---[ 0x1100: Reserved exception ]------------------------------------- */
  322. .org 0x1100
  323. UNHANDLED_EXCEPTION(_vector_0x1100)
  324. /* ---[ 0x1200: Reserved exception ]------------------------------------- */
  325. .org 0x1200
  326. UNHANDLED_EXCEPTION(_vector_0x1200)
  327. /* ---[ 0x1300: Reserved exception ]------------------------------------- */
  328. .org 0x1300
  329. UNHANDLED_EXCEPTION(_vector_0x1300)
  330. /* ---[ 0x1400: Reserved exception ]------------------------------------- */
  331. .org 0x1400
  332. UNHANDLED_EXCEPTION(_vector_0x1400)
  333. /* ---[ 0x1500: Reserved exception ]------------------------------------- */
  334. .org 0x1500
  335. UNHANDLED_EXCEPTION(_vector_0x1500)
  336. /* ---[ 0x1600: Reserved exception ]------------------------------------- */
  337. .org 0x1600
  338. UNHANDLED_EXCEPTION(_vector_0x1600)
  339. /* ---[ 0x1700: Reserved exception ]------------------------------------- */
  340. .org 0x1700
  341. UNHANDLED_EXCEPTION(_vector_0x1700)
  342. /* ---[ 0x1800: Reserved exception ]------------------------------------- */
  343. .org 0x1800
  344. UNHANDLED_EXCEPTION(_vector_0x1800)
  345. /* ---[ 0x1900: Reserved exception ]------------------------------------- */
  346. .org 0x1900
  347. UNHANDLED_EXCEPTION(_vector_0x1900)
  348. /* ---[ 0x1a00: Reserved exception ]------------------------------------- */
  349. .org 0x1a00
  350. UNHANDLED_EXCEPTION(_vector_0x1a00)
  351. /* ---[ 0x1b00: Reserved exception ]------------------------------------- */
  352. .org 0x1b00
  353. UNHANDLED_EXCEPTION(_vector_0x1b00)
  354. /* ---[ 0x1c00: Reserved exception ]------------------------------------- */
  355. .org 0x1c00
  356. UNHANDLED_EXCEPTION(_vector_0x1c00)
  357. /* ---[ 0x1d00: Reserved exception ]------------------------------------- */
  358. .org 0x1d00
  359. UNHANDLED_EXCEPTION(_vector_0x1d00)
  360. /* ---[ 0x1e00: Reserved exception ]------------------------------------- */
  361. .org 0x1e00
  362. UNHANDLED_EXCEPTION(_vector_0x1e00)
  363. /* ---[ 0x1f00: Reserved exception ]------------------------------------- */
  364. .org 0x1f00
  365. UNHANDLED_EXCEPTION(_vector_0x1f00)
  366. .org 0x2000
  367. /* ===================================================[ kernel start ]=== */
  368. /* .text*/
  369. /* This early stuff belongs in HEAD, but some of the functions below definitely
  370. * don't... */
  371. __HEAD
  372. .global _start
  373. _start:
  374. /* Init r0 to zero as per spec */
  375. CLEAR_GPR(r0)
  376. /* save kernel parameters */
  377. l.or r25,r0,r3 /* pointer to fdt */
  378. /*
  379. * ensure a deterministic start
  380. */
  381. l.ori r3,r0,0x1
  382. l.mtspr r0,r3,SPR_SR
  383. CLEAR_GPR(r1)
  384. CLEAR_GPR(r2)
  385. CLEAR_GPR(r3)
  386. CLEAR_GPR(r4)
  387. CLEAR_GPR(r5)
  388. CLEAR_GPR(r6)
  389. CLEAR_GPR(r7)
  390. CLEAR_GPR(r8)
  391. CLEAR_GPR(r9)
  392. CLEAR_GPR(r10)
  393. CLEAR_GPR(r11)
  394. CLEAR_GPR(r12)
  395. CLEAR_GPR(r13)
  396. CLEAR_GPR(r14)
  397. CLEAR_GPR(r15)
  398. CLEAR_GPR(r16)
  399. CLEAR_GPR(r17)
  400. CLEAR_GPR(r18)
  401. CLEAR_GPR(r19)
  402. CLEAR_GPR(r20)
  403. CLEAR_GPR(r21)
  404. CLEAR_GPR(r22)
  405. CLEAR_GPR(r23)
  406. CLEAR_GPR(r24)
  407. CLEAR_GPR(r26)
  408. CLEAR_GPR(r27)
  409. CLEAR_GPR(r28)
  410. CLEAR_GPR(r29)
  411. CLEAR_GPR(r30)
  412. CLEAR_GPR(r31)
  413. /*
  414. * set up initial ksp and current
  415. */
  416. /* setup kernel stack */
  417. LOAD_SYMBOL_2_GPR(r1,init_thread_union + THREAD_SIZE)
  418. LOAD_SYMBOL_2_GPR(r10,init_thread_union) // setup current
  419. tophys (r31,r10)
  420. l.sw TI_KSP(r31), r1
  421. l.ori r4,r0,0x0
  422. /*
  423. * .data contains initialized data,
  424. * .bss contains uninitialized data - clear it up
  425. */
  426. clear_bss:
  427. LOAD_SYMBOL_2_GPR(r24, __bss_start)
  428. LOAD_SYMBOL_2_GPR(r26, _end)
  429. tophys(r28,r24)
  430. tophys(r30,r26)
  431. CLEAR_GPR(r24)
  432. CLEAR_GPR(r26)
  433. 1:
  434. l.sw (0)(r28),r0
  435. l.sfltu r28,r30
  436. l.bf 1b
  437. l.addi r28,r28,4
  438. enable_ic:
  439. l.jal _ic_enable
  440. l.nop
  441. enable_dc:
  442. l.jal _dc_enable
  443. l.nop
  444. flush_tlb:
  445. l.jal _flush_tlb
  446. l.nop
  447. /* The MMU needs to be enabled before or32_early_setup is called */
  448. enable_mmu:
  449. /*
  450. * enable dmmu & immu
  451. * SR[5] = 0, SR[6] = 0, 6th and 7th bit of SR set to 0
  452. */
  453. l.mfspr r30,r0,SPR_SR
  454. l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
  455. l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
  456. l.or r30,r30,r28
  457. l.mtspr r0,r30,SPR_SR
  458. l.nop
  459. l.nop
  460. l.nop
  461. l.nop
  462. l.nop
  463. l.nop
  464. l.nop
  465. l.nop
  466. l.nop
  467. l.nop
  468. l.nop
  469. l.nop
  470. l.nop
  471. l.nop
  472. l.nop
  473. l.nop
  474. // reset the simulation counters
  475. l.nop 5
  476. /* check fdt header magic word */
  477. l.lwz r3,0(r25) /* load magic from fdt into r3 */
  478. l.movhi r4,hi(OF_DT_HEADER)
  479. l.ori r4,r4,lo(OF_DT_HEADER)
  480. l.sfeq r3,r4
  481. l.bf _fdt_found
  482. l.nop
  483. /* magic number mismatch, set fdt pointer to null */
  484. l.or r25,r0,r0
  485. _fdt_found:
  486. /* pass fdt pointer to or32_early_setup in r3 */
  487. l.or r3,r0,r25
  488. LOAD_SYMBOL_2_GPR(r24, or32_early_setup)
  489. l.jalr r24
  490. l.nop
  491. clear_regs:
  492. /*
  493. * clear all GPRS to increase determinism
  494. */
  495. CLEAR_GPR(r2)
  496. CLEAR_GPR(r3)
  497. CLEAR_GPR(r4)
  498. CLEAR_GPR(r5)
  499. CLEAR_GPR(r6)
  500. CLEAR_GPR(r7)
  501. CLEAR_GPR(r8)
  502. CLEAR_GPR(r9)
  503. CLEAR_GPR(r11)
  504. CLEAR_GPR(r12)
  505. CLEAR_GPR(r13)
  506. CLEAR_GPR(r14)
  507. CLEAR_GPR(r15)
  508. CLEAR_GPR(r16)
  509. CLEAR_GPR(r17)
  510. CLEAR_GPR(r18)
  511. CLEAR_GPR(r19)
  512. CLEAR_GPR(r20)
  513. CLEAR_GPR(r21)
  514. CLEAR_GPR(r22)
  515. CLEAR_GPR(r23)
  516. CLEAR_GPR(r24)
  517. CLEAR_GPR(r25)
  518. CLEAR_GPR(r26)
  519. CLEAR_GPR(r27)
  520. CLEAR_GPR(r28)
  521. CLEAR_GPR(r29)
  522. CLEAR_GPR(r30)
  523. CLEAR_GPR(r31)
  524. jump_start_kernel:
  525. /*
  526. * jump to kernel entry (start_kernel)
  527. */
  528. LOAD_SYMBOL_2_GPR(r30, start_kernel)
  529. l.jr r30
  530. l.nop
  531. _flush_tlb:
  532. /*
  533. * I N V A L I D A T E T L B e n t r i e s
  534. */
  535. LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0))
  536. LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0))
  537. l.addi r7,r0,128 /* Maximum number of sets */
  538. 1:
  539. l.mtspr r5,r0,0x0
  540. l.mtspr r6,r0,0x0
  541. l.addi r5,r5,1
  542. l.addi r6,r6,1
  543. l.sfeq r7,r0
  544. l.bnf 1b
  545. l.addi r7,r7,-1
  546. l.jr r9
  547. l.nop
  548. /* ========================================[ cache ]=== */
  549. /* alignment here so we don't change memory offsets with
  550. * memory controller defined
  551. */
  552. .align 0x2000
  553. _ic_enable:
  554. /* Check if IC present and skip enabling otherwise */
  555. l.mfspr r24,r0,SPR_UPR
  556. l.andi r26,r24,SPR_UPR_ICP
  557. l.sfeq r26,r0
  558. l.bf 9f
  559. l.nop
  560. /* Disable IC */
  561. l.mfspr r6,r0,SPR_SR
  562. l.addi r5,r0,-1
  563. l.xori r5,r5,SPR_SR_ICE
  564. l.and r5,r6,r5
  565. l.mtspr r0,r5,SPR_SR
  566. /* Establish cache block size
  567. If BS=0, 16;
  568. If BS=1, 32;
  569. r14 contain block size
  570. */
  571. l.mfspr r24,r0,SPR_ICCFGR
  572. l.andi r26,r24,SPR_ICCFGR_CBS
  573. l.srli r28,r26,7
  574. l.ori r30,r0,16
  575. l.sll r14,r30,r28
  576. /* Establish number of cache sets
  577. r16 contains number of cache sets
  578. r28 contains log(# of cache sets)
  579. */
  580. l.andi r26,r24,SPR_ICCFGR_NCS
  581. l.srli r28,r26,3
  582. l.ori r30,r0,1
  583. l.sll r16,r30,r28
  584. /* Invalidate IC */
  585. l.addi r6,r0,0
  586. l.sll r5,r14,r28
  587. // l.mul r5,r14,r16
  588. // l.trap 1
  589. // l.addi r5,r0,IC_SIZE
  590. 1:
  591. l.mtspr r0,r6,SPR_ICBIR
  592. l.sfne r6,r5
  593. l.bf 1b
  594. l.add r6,r6,r14
  595. // l.addi r6,r6,IC_LINE
  596. /* Enable IC */
  597. l.mfspr r6,r0,SPR_SR
  598. l.ori r6,r6,SPR_SR_ICE
  599. l.mtspr r0,r6,SPR_SR
  600. l.nop
  601. l.nop
  602. l.nop
  603. l.nop
  604. l.nop
  605. l.nop
  606. l.nop
  607. l.nop
  608. l.nop
  609. l.nop
  610. 9:
  611. l.jr r9
  612. l.nop
  613. _dc_enable:
  614. /* Check if DC present and skip enabling otherwise */
  615. l.mfspr r24,r0,SPR_UPR
  616. l.andi r26,r24,SPR_UPR_DCP
  617. l.sfeq r26,r0
  618. l.bf 9f
  619. l.nop
  620. /* Disable DC */
  621. l.mfspr r6,r0,SPR_SR
  622. l.addi r5,r0,-1
  623. l.xori r5,r5,SPR_SR_DCE
  624. l.and r5,r6,r5
  625. l.mtspr r0,r5,SPR_SR
  626. /* Establish cache block size
  627. If BS=0, 16;
  628. If BS=1, 32;
  629. r14 contain block size
  630. */
  631. l.mfspr r24,r0,SPR_DCCFGR
  632. l.andi r26,r24,SPR_DCCFGR_CBS
  633. l.srli r28,r26,7
  634. l.ori r30,r0,16
  635. l.sll r14,r30,r28
  636. /* Establish number of cache sets
  637. r16 contains number of cache sets
  638. r28 contains log(# of cache sets)
  639. */
  640. l.andi r26,r24,SPR_DCCFGR_NCS
  641. l.srli r28,r26,3
  642. l.ori r30,r0,1
  643. l.sll r16,r30,r28
  644. /* Invalidate DC */
  645. l.addi r6,r0,0
  646. l.sll r5,r14,r28
  647. 1:
  648. l.mtspr r0,r6,SPR_DCBIR
  649. l.sfne r6,r5
  650. l.bf 1b
  651. l.add r6,r6,r14
  652. /* Enable DC */
  653. l.mfspr r6,r0,SPR_SR
  654. l.ori r6,r6,SPR_SR_DCE
  655. l.mtspr r0,r6,SPR_SR
  656. 9:
  657. l.jr r9
  658. l.nop
  659. /* ===============================================[ page table masks ]=== */
  660. #define DTLB_UP_CONVERT_MASK 0x3fa
  661. #define ITLB_UP_CONVERT_MASK 0x3a
  662. /* for SMP we'd have (this is a bit subtle, CC must be always set
  663. * for SMP, but since we have _PAGE_PRESENT bit always defined
  664. * we can just modify the mask)
  665. */
  666. #define DTLB_SMP_CONVERT_MASK 0x3fb
  667. #define ITLB_SMP_CONVERT_MASK 0x3b
  668. /* ---[ boot dtlb miss handler ]----------------------------------------- */
  669. boot_dtlb_miss_handler:
  670. /* mask for DTLB_MR register: - (0) sets V (valid) bit,
  671. * - (31-12) sets bits belonging to VPN (31-12)
  672. */
  673. #define DTLB_MR_MASK 0xfffff001
  674. /* mask for DTLB_TR register: - (2) sets CI (cache inhibit) bit,
  675. * - (4) sets A (access) bit,
  676. * - (5) sets D (dirty) bit,
  677. * - (8) sets SRE (superuser read) bit
  678. * - (9) sets SWE (superuser write) bit
  679. * - (31-12) sets bits belonging to VPN (31-12)
  680. */
  681. #define DTLB_TR_MASK 0xfffff332
  682. /* These are for masking out the VPN/PPN value from the MR/TR registers...
  683. * it's not the same as the PFN */
  684. #define VPN_MASK 0xfffff000
  685. #define PPN_MASK 0xfffff000
  686. EXCEPTION_STORE_GPR6
  687. #if 0
  688. l.mfspr r6,r0,SPR_ESR_BASE //
  689. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  690. l.sfeqi r6,0 // r6 == 0x1 --> SM
  691. l.bf exit_with_no_dtranslation //
  692. l.nop
  693. #endif
  694. /* this could be optimized by moving storing of
  695. * non r6 registers here, and jumping r6 restore
  696. * if not in supervisor mode
  697. */
  698. EXCEPTION_STORE_GPR2
  699. EXCEPTION_STORE_GPR3
  700. EXCEPTION_STORE_GPR4
  701. EXCEPTION_STORE_GPR5
  702. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  703. immediate_translation:
  704. CLEAR_GPR(r6)
  705. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  706. l.mfspr r6, r0, SPR_DMMUCFGR
  707. l.andi r6, r6, SPR_DMMUCFGR_NTS
  708. l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF
  709. l.ori r5, r0, 0x1
  710. l.sll r5, r5, r6 // r5 = number DMMU sets
  711. l.addi r6, r5, -1 // r6 = nsets mask
  712. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  713. l.or r6,r6,r4 // r6 <- r4
  714. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  715. l.movhi r5,hi(DTLB_MR_MASK) // r5 <- ffff:0000.x000
  716. l.ori r5,r5,lo(DTLB_MR_MASK) // r5 <- ffff:1111.x001 - apply DTLB_MR_MASK
  717. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have DTLBMR entry
  718. l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // set DTLBMR
  719. /* set up DTLB with no translation for EA <= 0xbfffffff */
  720. LOAD_SYMBOL_2_GPR(r6,0xbfffffff)
  721. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xbfffffff >= EA)
  722. l.bf 1f // goto out
  723. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  724. tophys(r3,r4) // r3 <- PA
  725. 1:
  726. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  727. l.movhi r5,hi(DTLB_TR_MASK) // r5 <- ffff:0000.x000
  728. l.ori r5,r5,lo(DTLB_TR_MASK) // r5 <- ffff:1111.x330 - apply DTLB_MR_MASK
  729. l.and r5,r5,r3 // r5 <- PPN :PPN .x330 - we have DTLBTR entry
  730. l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // set DTLBTR
  731. EXCEPTION_LOAD_GPR6
  732. EXCEPTION_LOAD_GPR5
  733. EXCEPTION_LOAD_GPR4
  734. EXCEPTION_LOAD_GPR3
  735. EXCEPTION_LOAD_GPR2
  736. l.rfe // SR <- ESR, PC <- EPC
  737. exit_with_no_dtranslation:
  738. /* EA out of memory or not in supervisor mode */
  739. EXCEPTION_LOAD_GPR6
  740. EXCEPTION_LOAD_GPR4
  741. l.j _dispatch_bus_fault
  742. /* ---[ boot itlb miss handler ]----------------------------------------- */
  743. boot_itlb_miss_handler:
  744. /* mask for ITLB_MR register: - sets V (valid) bit,
  745. * - sets bits belonging to VPN (15-12)
  746. */
  747. #define ITLB_MR_MASK 0xfffff001
  748. /* mask for ITLB_TR register: - sets A (access) bit,
  749. * - sets SXE (superuser execute) bit
  750. * - sets bits belonging to VPN (15-12)
  751. */
  752. #define ITLB_TR_MASK 0xfffff050
  753. /*
  754. #define VPN_MASK 0xffffe000
  755. #define PPN_MASK 0xffffe000
  756. */
  757. EXCEPTION_STORE_GPR2
  758. EXCEPTION_STORE_GPR3
  759. EXCEPTION_STORE_GPR4
  760. EXCEPTION_STORE_GPR5
  761. EXCEPTION_STORE_GPR6
  762. #if 0
  763. l.mfspr r6,r0,SPR_ESR_BASE //
  764. l.andi r6,r6,SPR_SR_SM // are we in kernel mode ?
  765. l.sfeqi r6,0 // r6 == 0x1 --> SM
  766. l.bf exit_with_no_itranslation
  767. l.nop
  768. #endif
  769. l.mfspr r4,r0,SPR_EEAR_BASE // get the offending EA
  770. earlyearly:
  771. CLEAR_GPR(r6)
  772. l.srli r3,r4,0xd // r3 <- r4 / 8192 (sets are relative to page size (8Kb) NOT VPN size (4Kb)
  773. l.mfspr r6, r0, SPR_IMMUCFGR
  774. l.andi r6, r6, SPR_IMMUCFGR_NTS
  775. l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF
  776. l.ori r5, r0, 0x1
  777. l.sll r5, r5, r6 // r5 = number IMMU sets from IMMUCFGR
  778. l.addi r6, r5, -1 // r6 = nsets mask
  779. l.and r2, r3, r6 // r2 <- r3 % NSETS_MASK
  780. l.or r6,r6,r4 // r6 <- r4
  781. l.ori r6,r6,~(VPN_MASK) // r6 <- VPN :VPN .xfff - clear up lo(r6) to 0x**** *fff
  782. l.movhi r5,hi(ITLB_MR_MASK) // r5 <- ffff:0000.x000
  783. l.ori r5,r5,lo(ITLB_MR_MASK) // r5 <- ffff:1111.x001 - apply ITLB_MR_MASK
  784. l.and r5,r5,r6 // r5 <- VPN :VPN .x001 - we have ITLBMR entry
  785. l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // set ITLBMR
  786. /*
  787. * set up ITLB with no translation for EA <= 0x0fffffff
  788. *
  789. * we need this for head.S mapping (EA = PA). if we move all functions
  790. * which run with mmu enabled into entry.S, we might be able to eliminate this.
  791. *
  792. */
  793. LOAD_SYMBOL_2_GPR(r6,0x0fffffff)
  794. l.sfgeu r6,r4 // flag if r6 >= r4 (if 0xb0ffffff >= EA)
  795. l.bf 1f // goto out
  796. l.and r3,r4,r4 // delay slot :: 24 <- r4 (if flag==1)
  797. tophys(r3,r4) // r3 <- PA
  798. 1:
  799. l.ori r3,r3,~(PPN_MASK) // r3 <- PPN :PPN .xfff - clear up lo(r6) to 0x**** *fff
  800. l.movhi r5,hi(ITLB_TR_MASK) // r5 <- ffff:0000.x000
  801. l.ori r5,r5,lo(ITLB_TR_MASK) // r5 <- ffff:1111.x050 - apply ITLB_MR_MASK
  802. l.and r5,r5,r3 // r5 <- PPN :PPN .x050 - we have ITLBTR entry
  803. l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // set ITLBTR
  804. EXCEPTION_LOAD_GPR6
  805. EXCEPTION_LOAD_GPR5
  806. EXCEPTION_LOAD_GPR4
  807. EXCEPTION_LOAD_GPR3
  808. EXCEPTION_LOAD_GPR2
  809. l.rfe // SR <- ESR, PC <- EPC
  810. exit_with_no_itranslation:
  811. EXCEPTION_LOAD_GPR4
  812. EXCEPTION_LOAD_GPR6
  813. l.j _dispatch_bus_fault
  814. l.nop
  815. /* ====================================================================== */
  816. /*
  817. * Stuff below here shouldn't go into .head section... maybe this stuff
  818. * can be moved to entry.S ???
  819. */
  820. /* ==============================================[ DTLB miss handler ]=== */
  821. /*
  822. * Comments:
  823. * Exception handlers are entered with MMU off so the following handler
  824. * needs to use physical addressing
  825. *
  826. */
  827. .text
  828. ENTRY(dtlb_miss_handler)
  829. EXCEPTION_STORE_GPR2
  830. EXCEPTION_STORE_GPR3
  831. EXCEPTION_STORE_GPR4
  832. /*
  833. * get EA of the miss
  834. */
  835. l.mfspr r2,r0,SPR_EEAR_BASE
  836. /*
  837. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  838. */
  839. GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r4 is temp
  840. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  841. l.slli r4,r4,0x2 // to get address << 2
  842. l.add r3,r4,r3 // r4 is pgd_index(daddr)
  843. /*
  844. * if (pmd_none(*pmd))
  845. * goto pmd_none:
  846. */
  847. tophys (r4,r3)
  848. l.lwz r3,0x0(r4) // get *pmd value
  849. l.sfne r3,r0
  850. l.bnf d_pmd_none
  851. l.addi r3,r0,0xffffe000 // PAGE_MASK
  852. d_pmd_good:
  853. /*
  854. * pte = *pte_offset(pmd, daddr);
  855. */
  856. l.lwz r4,0x0(r4) // get **pmd value
  857. l.and r4,r4,r3 // & PAGE_MASK
  858. l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  859. l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  860. l.slli r3,r3,0x2 // to get address << 2
  861. l.add r3,r3,r4
  862. l.lwz r3,0x0(r3) // this is pte at last
  863. /*
  864. * if (!pte_present(pte))
  865. */
  866. l.andi r4,r3,0x1
  867. l.sfne r4,r0 // is pte present
  868. l.bnf d_pte_not_present
  869. l.addi r4,r0,0xffffe3fa // PAGE_MASK | DTLB_UP_CONVERT_MASK
  870. /*
  871. * fill DTLB TR register
  872. */
  873. l.and r4,r3,r4 // apply the mask
  874. // Determine number of DMMU sets
  875. l.mfspr r2, r0, SPR_DMMUCFGR
  876. l.andi r2, r2, SPR_DMMUCFGR_NTS
  877. l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF
  878. l.ori r3, r0, 0x1
  879. l.sll r3, r3, r2 // r3 = number DMMU sets DMMUCFGR
  880. l.addi r2, r3, -1 // r2 = nsets mask
  881. l.mfspr r3, r0, SPR_EEAR_BASE
  882. l.srli r3, r3, 0xd // >> PAGE_SHIFT
  883. l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
  884. //NUM_TLB_ENTRIES
  885. l.mtspr r2,r4,SPR_DTLBTR_BASE(0)
  886. /*
  887. * fill DTLB MR register
  888. */
  889. l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
  890. l.ori r4,r3,0x1 // set hardware valid bit: DTBL_MR entry
  891. l.mtspr r2,r4,SPR_DTLBMR_BASE(0)
  892. EXCEPTION_LOAD_GPR2
  893. EXCEPTION_LOAD_GPR3
  894. EXCEPTION_LOAD_GPR4
  895. l.rfe
  896. d_pmd_none:
  897. d_pte_not_present:
  898. EXCEPTION_LOAD_GPR2
  899. EXCEPTION_LOAD_GPR3
  900. EXCEPTION_LOAD_GPR4
  901. EXCEPTION_HANDLE(_dtlb_miss_page_fault_handler)
  902. /* ==============================================[ ITLB miss handler ]=== */
  903. ENTRY(itlb_miss_handler)
  904. EXCEPTION_STORE_GPR2
  905. EXCEPTION_STORE_GPR3
  906. EXCEPTION_STORE_GPR4
  907. /*
  908. * get EA of the miss
  909. */
  910. l.mfspr r2,r0,SPR_EEAR_BASE
  911. /*
  912. * pmd = (pmd_t *)(current_pgd + pgd_index(daddr));
  913. *
  914. */
  915. GET_CURRENT_PGD(r3,r4) // r3 is current_pgd, r5 is temp
  916. l.srli r4,r2,0x18 // >> PAGE_SHIFT + (PAGE_SHIFT - 2)
  917. l.slli r4,r4,0x2 // to get address << 2
  918. l.add r3,r4,r3 // r4 is pgd_index(daddr)
  919. /*
  920. * if (pmd_none(*pmd))
  921. * goto pmd_none:
  922. */
  923. tophys (r4,r3)
  924. l.lwz r3,0x0(r4) // get *pmd value
  925. l.sfne r3,r0
  926. l.bnf i_pmd_none
  927. l.addi r3,r0,0xffffe000 // PAGE_MASK
  928. i_pmd_good:
  929. /*
  930. * pte = *pte_offset(pmd, iaddr);
  931. *
  932. */
  933. l.lwz r4,0x0(r4) // get **pmd value
  934. l.and r4,r4,r3 // & PAGE_MASK
  935. l.srli r2,r2,0xd // >> PAGE_SHIFT, r2 == EEAR
  936. l.andi r3,r2,0x7ff // (1UL << PAGE_SHIFT - 2) - 1
  937. l.slli r3,r3,0x2 // to get address << 2
  938. l.add r3,r3,r4
  939. l.lwz r3,0x0(r3) // this is pte at last
  940. /*
  941. * if (!pte_present(pte))
  942. *
  943. */
  944. l.andi r4,r3,0x1
  945. l.sfne r4,r0 // is pte present
  946. l.bnf i_pte_not_present
  947. l.addi r4,r0,0xffffe03a // PAGE_MASK | ITLB_UP_CONVERT_MASK
  948. /*
  949. * fill ITLB TR register
  950. */
  951. l.and r4,r3,r4 // apply the mask
  952. l.andi r3,r3,0x7c0 // _PAGE_EXEC | _PAGE_SRE | _PAGE_SWE | _PAGE_URE | _PAGE_UWE
  953. l.sfeq r3,r0
  954. l.bf itlb_tr_fill //_workaround
  955. // Determine number of IMMU sets
  956. l.mfspr r2, r0, SPR_IMMUCFGR
  957. l.andi r2, r2, SPR_IMMUCFGR_NTS
  958. l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF
  959. l.ori r3, r0, 0x1
  960. l.sll r3, r3, r2 // r3 = number IMMU sets IMMUCFGR
  961. l.addi r2, r3, -1 // r2 = nsets mask
  962. l.mfspr r3, r0, SPR_EEAR_BASE
  963. l.srli r3, r3, 0xd // >> PAGE_SHIFT
  964. l.and r2, r3, r2 // calc offset: & (NUM_TLB_ENTRIES-1)
  965. /*
  966. * __PHX__ :: fixme
  967. * we should not just blindly set executable flags,
  968. * but it does help with ping. the clean way would be to find out
  969. * (and fix it) why stack doesn't have execution permissions
  970. */
  971. itlb_tr_fill_workaround:
  972. l.ori r4,r4,0xc0 // | (SPR_ITLBTR_UXE | ITLBTR_SXE)
  973. itlb_tr_fill:
  974. l.mtspr r2,r4,SPR_ITLBTR_BASE(0)
  975. /*
  976. * fill DTLB MR register
  977. */
  978. l.slli r3, r3, 0xd /* << PAGE_SHIFT => EA & PAGE_MASK */
  979. l.ori r4,r3,0x1 // set hardware valid bit: ITBL_MR entry
  980. l.mtspr r2,r4,SPR_ITLBMR_BASE(0)
  981. EXCEPTION_LOAD_GPR2
  982. EXCEPTION_LOAD_GPR3
  983. EXCEPTION_LOAD_GPR4
  984. l.rfe
  985. i_pmd_none:
  986. i_pte_not_present:
  987. EXCEPTION_LOAD_GPR2
  988. EXCEPTION_LOAD_GPR3
  989. EXCEPTION_LOAD_GPR4
  990. EXCEPTION_HANDLE(_itlb_miss_page_fault_handler)
  991. /* ==============================================[ boot tlb handlers ]=== */
  992. /* =================================================[ debugging aids ]=== */
  993. .align 64
  994. _immu_trampoline:
  995. .space 64
  996. _immu_trampoline_top:
  997. #define TRAMP_SLOT_0 (0x0)
  998. #define TRAMP_SLOT_1 (0x4)
  999. #define TRAMP_SLOT_2 (0x8)
  1000. #define TRAMP_SLOT_3 (0xc)
  1001. #define TRAMP_SLOT_4 (0x10)
  1002. #define TRAMP_SLOT_5 (0x14)
  1003. #define TRAMP_FRAME_SIZE (0x18)
  1004. ENTRY(_immu_trampoline_workaround)
  1005. // r2 EEA
  1006. // r6 is physical EEA
  1007. tophys(r6,r2)
  1008. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1009. tophys (r3,r5) // r3 is trampoline (physical)
  1010. LOAD_SYMBOL_2_GPR(r4,0x15000000)
  1011. l.sw TRAMP_SLOT_0(r3),r4
  1012. l.sw TRAMP_SLOT_1(r3),r4
  1013. l.sw TRAMP_SLOT_4(r3),r4
  1014. l.sw TRAMP_SLOT_5(r3),r4
  1015. // EPC = EEA - 0x4
  1016. l.lwz r4,0x0(r6) // load op @ EEA + 0x0 (fc address)
  1017. l.sw TRAMP_SLOT_3(r3),r4 // store it to _immu_trampoline_data
  1018. l.lwz r4,-0x4(r6) // load op @ EEA - 0x4 (f8 address)
  1019. l.sw TRAMP_SLOT_2(r3),r4 // store it to _immu_trampoline_data
  1020. l.srli r5,r4,26 // check opcode for write access
  1021. l.sfeqi r5,0 // l.j
  1022. l.bf 0f
  1023. l.sfeqi r5,0x11 // l.jr
  1024. l.bf 1f
  1025. l.sfeqi r5,1 // l.jal
  1026. l.bf 2f
  1027. l.sfeqi r5,0x12 // l.jalr
  1028. l.bf 3f
  1029. l.sfeqi r5,3 // l.bnf
  1030. l.bf 4f
  1031. l.sfeqi r5,4 // l.bf
  1032. l.bf 5f
  1033. 99:
  1034. l.nop
  1035. l.j 99b // should never happen
  1036. l.nop 1
  1037. // r2 is EEA
  1038. // r3 is trampoline address (physical)
  1039. // r4 is instruction
  1040. // r6 is physical(EEA)
  1041. //
  1042. // r5
  1043. 2: // l.jal
  1044. /* 19 20 aa aa l.movhi r9,0xaaaa
  1045. * a9 29 bb bb l.ori r9,0xbbbb
  1046. *
  1047. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1048. */
  1049. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1050. // l.movhi r9,0xaaaa
  1051. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1052. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1053. l.srli r5,r6,16
  1054. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1055. // l.ori r9,0xbbbb
  1056. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1057. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1058. l.andi r5,r6,0xffff
  1059. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1060. /* falthrough, need to set up new jump offset */
  1061. 0: // l.j
  1062. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1063. // l.srli r6,r6,6 // original offset shifted right 2
  1064. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1065. // l.srli r4,r4,6 // old jump position: shifted right 2
  1066. l.addi r5,r3,0xc // new jump position (physical)
  1067. l.slli r5,r5,4 // new jump position: shifted left 4
  1068. // calculate new jump offset
  1069. // new_off = old_off + (old_jump - new_jump)
  1070. l.sub r5,r4,r5 // old_jump - new_jump
  1071. l.add r5,r6,r5 // orig_off + (old_jump - new_jump)
  1072. l.srli r5,r5,6 // new offset shifted right 2
  1073. // r5 is new jump offset
  1074. // l.j has opcode 0x0...
  1075. l.sw TRAMP_SLOT_2(r3),r5 // write it back
  1076. l.j trampoline_out
  1077. l.nop
  1078. /* ----------------------------- */
  1079. 3: // l.jalr
  1080. /* 19 20 aa aa l.movhi r9,0xaaaa
  1081. * a9 29 bb bb l.ori r9,0xbbbb
  1082. *
  1083. * where 0xaaaabbbb is EEA + 0x4 shifted right 2
  1084. */
  1085. l.addi r6,r2,0x4 // this is 0xaaaabbbb
  1086. // l.movhi r9,0xaaaa
  1087. l.ori r5,r0,0x1920 // 0x1920 == l.movhi r9
  1088. l.sh (TRAMP_SLOT_0+0x0)(r3),r5
  1089. l.srli r5,r6,16
  1090. l.sh (TRAMP_SLOT_0+0x2)(r3),r5
  1091. // l.ori r9,0xbbbb
  1092. l.ori r5,r0,0xa929 // 0xa929 == l.ori r9
  1093. l.sh (TRAMP_SLOT_1+0x0)(r3),r5
  1094. l.andi r5,r6,0xffff
  1095. l.sh (TRAMP_SLOT_1+0x2)(r3),r5
  1096. l.lhz r5,(TRAMP_SLOT_2+0x0)(r3) // load hi part of jump instruction
  1097. l.andi r5,r5,0x3ff // clear out opcode part
  1098. l.ori r5,r5,0x4400 // opcode changed from l.jalr -> l.jr
  1099. l.sh (TRAMP_SLOT_2+0x0)(r3),r5 // write it back
  1100. /* falthrough */
  1101. 1: // l.jr
  1102. l.j trampoline_out
  1103. l.nop
  1104. /* ----------------------------- */
  1105. 4: // l.bnf
  1106. 5: // l.bf
  1107. l.slli r6,r4,6 // original offset shifted left 6 - 2
  1108. // l.srli r6,r6,6 // original offset shifted right 2
  1109. l.slli r4,r2,4 // old jump position: EEA shifted left 4
  1110. // l.srli r4,r4,6 // old jump position: shifted right 2
  1111. l.addi r5,r3,0xc // new jump position (physical)
  1112. l.slli r5,r5,4 // new jump position: shifted left 4
  1113. // calculate new jump offset
  1114. // new_off = old_off + (old_jump - new_jump)
  1115. l.add r6,r6,r4 // (orig_off + old_jump)
  1116. l.sub r6,r6,r5 // (orig_off + old_jump) - new_jump
  1117. l.srli r6,r6,6 // new offset shifted right 2
  1118. // r6 is new jump offset
  1119. l.lwz r4,(TRAMP_SLOT_2+0x0)(r3) // load jump instruction
  1120. l.srli r4,r4,16
  1121. l.andi r4,r4,0xfc00 // get opcode part
  1122. l.slli r4,r4,16
  1123. l.or r6,r4,r6 // l.b(n)f new offset
  1124. l.sw TRAMP_SLOT_2(r3),r6 // write it back
  1125. /* we need to add l.j to EEA + 0x8 */
  1126. tophys (r4,r2) // may not be needed (due to shifts down_
  1127. l.addi r4,r4,(0x8 - 0x8) // jump target = r2 + 0x8 (compensate for 0x8)
  1128. // jump position = r5 + 0x8 (0x8 compensated)
  1129. l.sub r4,r4,r5 // jump offset = target - new_position + 0x8
  1130. l.slli r4,r4,4 // the amount of info in imediate of jump
  1131. l.srli r4,r4,6 // jump instruction with offset
  1132. l.sw TRAMP_SLOT_4(r3),r4 // write it to 4th slot
  1133. /* fallthrough */
  1134. trampoline_out:
  1135. // set up new EPC to point to our trampoline code
  1136. LOAD_SYMBOL_2_GPR(r5,_immu_trampoline)
  1137. l.mtspr r0,r5,SPR_EPCR_BASE
  1138. // immu_trampoline is (4x) CACHE_LINE aligned
  1139. // and only 6 instructions long,
  1140. // so we need to invalidate only 2 lines
  1141. /* Establish cache block size
  1142. If BS=0, 16;
  1143. If BS=1, 32;
  1144. r14 contain block size
  1145. */
  1146. l.mfspr r21,r0,SPR_ICCFGR
  1147. l.andi r21,r21,SPR_ICCFGR_CBS
  1148. l.srli r21,r21,7
  1149. l.ori r23,r0,16
  1150. l.sll r14,r23,r21
  1151. l.mtspr r0,r5,SPR_ICBIR
  1152. l.add r5,r5,r14
  1153. l.mtspr r0,r5,SPR_ICBIR
  1154. l.jr r9
  1155. l.nop
  1156. /*
  1157. * DSCR: prints a string referenced by r3.
  1158. *
  1159. * PRMS: r3 - address of the first character of null
  1160. * terminated string to be printed
  1161. *
  1162. * PREQ: UART at UART_BASE_ADD has to be initialized
  1163. *
  1164. * POST: caller should be aware that r3, r9 are changed
  1165. */
  1166. ENTRY(_emergency_print)
  1167. EMERGENCY_PRINT_STORE_GPR4
  1168. EMERGENCY_PRINT_STORE_GPR5
  1169. EMERGENCY_PRINT_STORE_GPR6
  1170. EMERGENCY_PRINT_STORE_GPR7
  1171. 2:
  1172. l.lbz r7,0(r3)
  1173. l.sfeq r7,r0
  1174. l.bf 9f
  1175. l.nop
  1176. // putc:
  1177. l.movhi r4,hi(UART_BASE_ADD)
  1178. l.addi r6,r0,0x20
  1179. 1: l.lbz r5,5(r4)
  1180. l.andi r5,r5,0x20
  1181. l.sfeq r5,r6
  1182. l.bnf 1b
  1183. l.nop
  1184. l.sb 0(r4),r7
  1185. l.addi r6,r0,0x60
  1186. 1: l.lbz r5,5(r4)
  1187. l.andi r5,r5,0x60
  1188. l.sfeq r5,r6
  1189. l.bnf 1b
  1190. l.nop
  1191. /* next character */
  1192. l.j 2b
  1193. l.addi r3,r3,0x1
  1194. 9:
  1195. EMERGENCY_PRINT_LOAD_GPR7
  1196. EMERGENCY_PRINT_LOAD_GPR6
  1197. EMERGENCY_PRINT_LOAD_GPR5
  1198. EMERGENCY_PRINT_LOAD_GPR4
  1199. l.jr r9
  1200. l.nop
  1201. ENTRY(_emergency_print_nr)
  1202. EMERGENCY_PRINT_STORE_GPR4
  1203. EMERGENCY_PRINT_STORE_GPR5
  1204. EMERGENCY_PRINT_STORE_GPR6
  1205. EMERGENCY_PRINT_STORE_GPR7
  1206. EMERGENCY_PRINT_STORE_GPR8
  1207. l.addi r8,r0,32 // shift register
  1208. 1: /* remove leading zeros */
  1209. l.addi r8,r8,-0x4
  1210. l.srl r7,r3,r8
  1211. l.andi r7,r7,0xf
  1212. /* don't skip the last zero if number == 0x0 */
  1213. l.sfeqi r8,0x4
  1214. l.bf 2f
  1215. l.nop
  1216. l.sfeq r7,r0
  1217. l.bf 1b
  1218. l.nop
  1219. 2:
  1220. l.srl r7,r3,r8
  1221. l.andi r7,r7,0xf
  1222. l.sflts r8,r0
  1223. l.bf 9f
  1224. l.sfgtui r7,0x9
  1225. l.bnf 8f
  1226. l.nop
  1227. l.addi r7,r7,0x27
  1228. 8:
  1229. l.addi r7,r7,0x30
  1230. // putc:
  1231. l.movhi r4,hi(UART_BASE_ADD)
  1232. l.addi r6,r0,0x20
  1233. 1: l.lbz r5,5(r4)
  1234. l.andi r5,r5,0x20
  1235. l.sfeq r5,r6
  1236. l.bnf 1b
  1237. l.nop
  1238. l.sb 0(r4),r7
  1239. l.addi r6,r0,0x60
  1240. 1: l.lbz r5,5(r4)
  1241. l.andi r5,r5,0x60
  1242. l.sfeq r5,r6
  1243. l.bnf 1b
  1244. l.nop
  1245. /* next character */
  1246. l.j 2b
  1247. l.addi r8,r8,-0x4
  1248. 9:
  1249. EMERGENCY_PRINT_LOAD_GPR8
  1250. EMERGENCY_PRINT_LOAD_GPR7
  1251. EMERGENCY_PRINT_LOAD_GPR6
  1252. EMERGENCY_PRINT_LOAD_GPR5
  1253. EMERGENCY_PRINT_LOAD_GPR4
  1254. l.jr r9
  1255. l.nop
  1256. /*
  1257. * This should be used for debugging only.
  1258. * It messes up the Linux early serial output
  1259. * somehow, so use it sparingly and essentially
  1260. * only if you need to debug something that goes wrong
  1261. * before Linux gets the early serial going.
  1262. *
  1263. * Furthermore, you'll have to make sure you set the
  1264. * UART_DEVISOR correctly according to the system
  1265. * clock rate.
  1266. *
  1267. *
  1268. */
  1269. #define SYS_CLK 20000000
  1270. //#define SYS_CLK 1843200
  1271. #define OR32_CONSOLE_BAUD 115200
  1272. #define UART_DIVISOR SYS_CLK/(16*OR32_CONSOLE_BAUD)
  1273. ENTRY(_early_uart_init)
  1274. l.movhi r3,hi(UART_BASE_ADD)
  1275. l.addi r4,r0,0x7
  1276. l.sb 0x2(r3),r4
  1277. l.addi r4,r0,0x0
  1278. l.sb 0x1(r3),r4
  1279. l.addi r4,r0,0x3
  1280. l.sb 0x3(r3),r4
  1281. l.lbz r5,3(r3)
  1282. l.ori r4,r5,0x80
  1283. l.sb 0x3(r3),r4
  1284. l.addi r4,r0,((UART_DIVISOR>>8) & 0x000000ff)
  1285. l.sb UART_DLM(r3),r4
  1286. l.addi r4,r0,((UART_DIVISOR) & 0x000000ff)
  1287. l.sb UART_DLL(r3),r4
  1288. l.sb 0x3(r3),r5
  1289. l.jr r9
  1290. l.nop
  1291. .section .rodata
  1292. _string_unhandled_exception:
  1293. .string "\n\rRunarunaround: Unhandled exception 0x\0"
  1294. _string_epc_prefix:
  1295. .string ": EPC=0x\0"
  1296. _string_nl:
  1297. .string "\n\r\0"
  1298. /* ========================================[ page aligned structures ]=== */
  1299. /*
  1300. * .data section should be page aligned
  1301. * (look into arch/or32/kernel/vmlinux.lds)
  1302. */
  1303. .section .data,"aw"
  1304. .align 8192
  1305. .global empty_zero_page
  1306. empty_zero_page:
  1307. .space 8192
  1308. .global swapper_pg_dir
  1309. swapper_pg_dir:
  1310. .space 8192
  1311. .global _unhandled_stack
  1312. _unhandled_stack:
  1313. .space 8192
  1314. _unhandled_stack_top:
  1315. /* ============================================================[ EOF ]=== */