entry.S 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. /* Create space for syscalls counting. */
  32. .section .data
  33. .global syscall_debug_table
  34. .align 4
  35. syscall_debug_table:
  36. .space (__NR_syscalls * 4)
  37. #endif /* DEBUG */
  38. #define C_ENTRY(name) .globl name; .align 4; name
  39. /*
  40. * Various ways of setting and clearing BIP in flags reg.
  41. * This is mucky, but necessary using microblaze version that
  42. * allows msr ops to write to BIP
  43. */
  44. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  45. .macro clear_bip
  46. msrclr r0, MSR_BIP
  47. .endm
  48. .macro set_bip
  49. msrset r0, MSR_BIP
  50. .endm
  51. .macro clear_eip
  52. msrclr r0, MSR_EIP
  53. .endm
  54. .macro set_ee
  55. msrset r0, MSR_EE
  56. .endm
  57. .macro disable_irq
  58. msrclr r0, MSR_IE
  59. .endm
  60. .macro enable_irq
  61. msrset r0, MSR_IE
  62. .endm
  63. .macro set_ums
  64. msrset r0, MSR_UMS
  65. msrclr r0, MSR_VMS
  66. .endm
  67. .macro set_vms
  68. msrclr r0, MSR_UMS
  69. msrset r0, MSR_VMS
  70. .endm
  71. .macro clear_ums
  72. msrclr r0, MSR_UMS
  73. .endm
  74. .macro clear_vms_ums
  75. msrclr r0, MSR_VMS | MSR_UMS
  76. .endm
  77. #else
  78. .macro clear_bip
  79. mfs r11, rmsr
  80. andi r11, r11, ~MSR_BIP
  81. mts rmsr, r11
  82. .endm
  83. .macro set_bip
  84. mfs r11, rmsr
  85. ori r11, r11, MSR_BIP
  86. mts rmsr, r11
  87. .endm
  88. .macro clear_eip
  89. mfs r11, rmsr
  90. andi r11, r11, ~MSR_EIP
  91. mts rmsr, r11
  92. .endm
  93. .macro set_ee
  94. mfs r11, rmsr
  95. ori r11, r11, MSR_EE
  96. mts rmsr, r11
  97. .endm
  98. .macro disable_irq
  99. mfs r11, rmsr
  100. andi r11, r11, ~MSR_IE
  101. mts rmsr, r11
  102. .endm
  103. .macro enable_irq
  104. mfs r11, rmsr
  105. ori r11, r11, MSR_IE
  106. mts rmsr, r11
  107. .endm
  108. .macro set_ums
  109. mfs r11, rmsr
  110. ori r11, r11, MSR_VMS
  111. andni r11, r11, MSR_UMS
  112. mts rmsr, r11
  113. .endm
  114. .macro set_vms
  115. mfs r11, rmsr
  116. ori r11, r11, MSR_VMS
  117. andni r11, r11, MSR_UMS
  118. mts rmsr, r11
  119. .endm
  120. .macro clear_ums
  121. mfs r11, rmsr
  122. andni r11, r11, MSR_UMS
  123. mts rmsr,r11
  124. .endm
  125. .macro clear_vms_ums
  126. mfs r11, rmsr
  127. andni r11, r11, (MSR_VMS|MSR_UMS)
  128. mts rmsr,r11
  129. .endm
  130. #endif
  131. /* Define how to call high-level functions. With MMU, virtual mode must be
  132. * enabled when calling the high-level function. Clobbers R11.
  133. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  134. */
  135. /* turn on virtual protected mode save */
  136. #define VM_ON \
  137. set_ums; \
  138. rted r0, 2f; \
  139. nop; \
  140. 2:
  141. /* turn off virtual protected mode save and user mode save*/
  142. #define VM_OFF \
  143. clear_vms_ums; \
  144. rted r0, TOPHYS(1f); \
  145. nop; \
  146. 1:
  147. #define SAVE_REGS \
  148. swi r2, r1, PT_R2; /* Save SDA */ \
  149. swi r3, r1, PT_R3; \
  150. swi r4, r1, PT_R4; \
  151. swi r5, r1, PT_R5; \
  152. swi r6, r1, PT_R6; \
  153. swi r7, r1, PT_R7; \
  154. swi r8, r1, PT_R8; \
  155. swi r9, r1, PT_R9; \
  156. swi r10, r1, PT_R10; \
  157. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  158. swi r12, r1, PT_R12; \
  159. swi r13, r1, PT_R13; /* Save SDA2 */ \
  160. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  161. swi r15, r1, PT_R15; /* Save LP */ \
  162. swi r16, r1, PT_R16; \
  163. swi r17, r1, PT_R17; \
  164. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  165. swi r19, r1, PT_R19; \
  166. swi r20, r1, PT_R20; \
  167. swi r21, r1, PT_R21; \
  168. swi r22, r1, PT_R22; \
  169. swi r23, r1, PT_R23; \
  170. swi r24, r1, PT_R24; \
  171. swi r25, r1, PT_R25; \
  172. swi r26, r1, PT_R26; \
  173. swi r27, r1, PT_R27; \
  174. swi r28, r1, PT_R28; \
  175. swi r29, r1, PT_R29; \
  176. swi r30, r1, PT_R30; \
  177. swi r31, r1, PT_R31; /* Save current task reg */ \
  178. mfs r11, rmsr; /* save MSR */ \
  179. swi r11, r1, PT_MSR;
  180. #define RESTORE_REGS_GP \
  181. lwi r2, r1, PT_R2; /* restore SDA */ \
  182. lwi r3, r1, PT_R3; \
  183. lwi r4, r1, PT_R4; \
  184. lwi r5, r1, PT_R5; \
  185. lwi r6, r1, PT_R6; \
  186. lwi r7, r1, PT_R7; \
  187. lwi r8, r1, PT_R8; \
  188. lwi r9, r1, PT_R9; \
  189. lwi r10, r1, PT_R10; \
  190. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  191. lwi r12, r1, PT_R12; \
  192. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  193. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  194. lwi r15, r1, PT_R15; /* restore LP */ \
  195. lwi r16, r1, PT_R16; \
  196. lwi r17, r1, PT_R17; \
  197. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  198. lwi r19, r1, PT_R19; \
  199. lwi r20, r1, PT_R20; \
  200. lwi r21, r1, PT_R21; \
  201. lwi r22, r1, PT_R22; \
  202. lwi r23, r1, PT_R23; \
  203. lwi r24, r1, PT_R24; \
  204. lwi r25, r1, PT_R25; \
  205. lwi r26, r1, PT_R26; \
  206. lwi r27, r1, PT_R27; \
  207. lwi r28, r1, PT_R28; \
  208. lwi r29, r1, PT_R29; \
  209. lwi r30, r1, PT_R30; \
  210. lwi r31, r1, PT_R31; /* Restore cur task reg */
  211. #define RESTORE_REGS \
  212. lwi r11, r1, PT_MSR; \
  213. mts rmsr , r11; \
  214. RESTORE_REGS_GP
  215. #define RESTORE_REGS_RTBD \
  216. lwi r11, r1, PT_MSR; \
  217. andni r11, r11, MSR_EIP; /* clear EIP */ \
  218. ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
  219. mts rmsr , r11; \
  220. RESTORE_REGS_GP
  221. #define SAVE_STATE \
  222. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  223. /* See if already in kernel mode.*/ \
  224. mfs r1, rmsr; \
  225. andi r1, r1, MSR_UMS; \
  226. bnei r1, 1f; \
  227. /* Kernel-mode state save. */ \
  228. /* Reload kernel stack-ptr. */ \
  229. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  230. /* FIXME: I can add these two lines to one */ \
  231. /* tophys(r1,r1); */ \
  232. /* addik r1, r1, -PT_SIZE; */ \
  233. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  234. SAVE_REGS \
  235. brid 2f; \
  236. swi r1, r1, PT_MODE; \
  237. 1: /* User-mode state save. */ \
  238. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  239. tophys(r1,r1); \
  240. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  241. /* MS these three instructions can be added to one */ \
  242. /* addik r1, r1, THREAD_SIZE; */ \
  243. /* tophys(r1,r1); */ \
  244. /* addik r1, r1, -PT_SIZE; */ \
  245. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  246. SAVE_REGS \
  247. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  248. swi r11, r1, PT_R1; /* Store user SP. */ \
  249. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  250. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  251. clear_ums; \
  252. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  253. .text
  254. /*
  255. * User trap.
  256. *
  257. * System calls are handled here.
  258. *
  259. * Syscall protocol:
  260. * Syscall number in r12, args in r5-r10
  261. * Return value in r3
  262. *
  263. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  264. * are masked. This is nice, means we don't have to CLI before state save
  265. */
  266. C_ENTRY(_user_exception):
  267. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  268. addi r14, r14, 4 /* return address is 4 byte after call */
  269. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  270. tophys(r1,r1);
  271. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  272. /* calculate kernel stack pointer from task struct 8k */
  273. addik r1, r1, THREAD_SIZE;
  274. tophys(r1,r1);
  275. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  276. SAVE_REGS
  277. swi r0, r1, PT_R3
  278. swi r0, r1, PT_R4
  279. swi r0, r1, PT_MODE; /* Was in user-mode. */
  280. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  281. swi r11, r1, PT_R1; /* Store user SP. */
  282. clear_ums;
  283. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  284. /* Save away the syscall number. */
  285. swi r12, r1, PT_R0;
  286. tovirt(r1,r1)
  287. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  288. /* Jump to the appropriate function for the system call number in r12
  289. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  290. * register should point to the location where
  291. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  292. /* Step into virtual mode */
  293. rtbd r0, 3f
  294. nop
  295. 3:
  296. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  297. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  298. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  299. beqi r11, 4f
  300. addik r3, r0, -ENOSYS
  301. swi r3, r1, PT_R3
  302. brlid r15, do_syscall_trace_enter
  303. addik r5, r1, PT_R0
  304. # do_syscall_trace_enter returns the new syscall nr.
  305. addk r12, r0, r3
  306. lwi r5, r1, PT_R5;
  307. lwi r6, r1, PT_R6;
  308. lwi r7, r1, PT_R7;
  309. lwi r8, r1, PT_R8;
  310. lwi r9, r1, PT_R9;
  311. lwi r10, r1, PT_R10;
  312. 4:
  313. /* Jump to the appropriate function for the system call number in r12
  314. * (r12 is not preserved), or return an error if r12 is not valid.
  315. * The LP register should point to the location where the called function
  316. * should return. [note that MAKE_SYS_CALL uses label 1] */
  317. /* See if the system call number is valid */
  318. blti r12, 5f
  319. addi r11, r12, -__NR_syscalls;
  320. bgei r11, 5f;
  321. /* Figure out which function to use for this system call. */
  322. /* Note Microblaze barrel shift is optional, so don't rely on it */
  323. add r12, r12, r12; /* convert num -> ptr */
  324. add r12, r12, r12;
  325. addi r30, r0, 1 /* restarts allowed */
  326. #ifdef DEBUG
  327. /* Trac syscalls and stored them to syscall_debug_table */
  328. /* The first syscall location stores total syscall number */
  329. lwi r3, r0, syscall_debug_table
  330. addi r3, r3, 1
  331. swi r3, r0, syscall_debug_table
  332. lwi r3, r12, syscall_debug_table
  333. addi r3, r3, 1
  334. swi r3, r12, syscall_debug_table
  335. #endif
  336. # Find and jump into the syscall handler.
  337. lwi r12, r12, sys_call_table
  338. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  339. addi r15, r0, ret_from_trap-8
  340. bra r12
  341. /* The syscall number is invalid, return an error. */
  342. 5:
  343. braid ret_from_trap
  344. addi r3, r0, -ENOSYS;
  345. /* Entry point used to return from a syscall/trap */
  346. /* We re-enable BIP bit before state restore */
  347. C_ENTRY(ret_from_trap):
  348. swi r3, r1, PT_R3
  349. swi r4, r1, PT_R4
  350. lwi r11, r1, PT_MODE;
  351. /* See if returning to kernel mode, if so, skip resched &c. */
  352. bnei r11, 2f;
  353. /* We're returning to user mode, so check for various conditions that
  354. * trigger rescheduling. */
  355. /* FIXME: Restructure all these flag checks. */
  356. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  357. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  358. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  359. beqi r11, 1f
  360. brlid r15, do_syscall_trace_leave
  361. addik r5, r1, PT_R0
  362. 1:
  363. /* We're returning to user mode, so check for various conditions that
  364. * trigger rescheduling. */
  365. /* get thread info from current task */
  366. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  367. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  368. andi r11, r19, _TIF_NEED_RESCHED;
  369. beqi r11, 5f;
  370. bralid r15, schedule; /* Call scheduler */
  371. nop; /* delay slot */
  372. bri 1b
  373. /* Maybe handle a signal */
  374. 5:
  375. andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  376. beqi r11, 4f; /* Signals to handle, handle them */
  377. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  378. bralid r15, do_notify_resume; /* Handle any signals */
  379. add r6, r30, r0; /* Arg 2: int in_syscall */
  380. add r30, r0, r0 /* no more restarts */
  381. bri 1b
  382. /* Finally, return to user state. */
  383. 4: set_bip; /* Ints masked for state restore */
  384. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  385. VM_OFF;
  386. tophys(r1,r1);
  387. RESTORE_REGS_RTBD;
  388. addik r1, r1, PT_SIZE /* Clean up stack space. */
  389. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  390. bri 6f;
  391. /* Return to kernel state. */
  392. 2: set_bip; /* Ints masked for state restore */
  393. VM_OFF;
  394. tophys(r1,r1);
  395. RESTORE_REGS_RTBD;
  396. addik r1, r1, PT_SIZE /* Clean up stack space. */
  397. tovirt(r1,r1);
  398. 6:
  399. TRAP_return: /* Make global symbol for debugging */
  400. rtbd r14, 0; /* Instructions to return from an IRQ */
  401. nop;
  402. /* This the initial entry point for a new child thread, with an appropriate
  403. stack in place that makes it look the the child is in the middle of an
  404. syscall. This function is actually `returned to' from switch_thread
  405. (copy_thread makes ret_from_fork the return address in each new thread's
  406. saved context). */
  407. C_ENTRY(ret_from_fork):
  408. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  409. add r5, r3, r0; /* switch_thread returns the prev task */
  410. /* ( in the delay slot ) */
  411. brid ret_from_trap; /* Do normal trap return */
  412. add r3, r0, r0; /* Child's fork call should return 0. */
  413. C_ENTRY(ret_from_kernel_thread):
  414. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  415. add r5, r3, r0; /* switch_thread returns the prev task */
  416. /* ( in the delay slot ) */
  417. brald r15, r20 /* fn was left in r20 */
  418. addk r5, r0, r19 /* ... and argument - in r19 */
  419. brid ret_from_trap
  420. add r3, r0, r0
  421. C_ENTRY(sys_rt_sigreturn_wrapper):
  422. addik r30, r0, 0 /* no restarts */
  423. brid sys_rt_sigreturn /* Do real work */
  424. addik r5, r1, 0; /* add user context as 1st arg */
  425. /*
  426. * HW EXCEPTION rutine start
  427. */
  428. C_ENTRY(full_exception_trap):
  429. /* adjust exception address for privileged instruction
  430. * for finding where is it */
  431. addik r17, r17, -4
  432. SAVE_STATE /* Save registers */
  433. /* PC, before IRQ/trap - this is one instruction above */
  434. swi r17, r1, PT_PC;
  435. tovirt(r1,r1)
  436. /* FIXME this can be store directly in PT_ESR reg.
  437. * I tested it but there is a fault */
  438. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  439. addik r15, r0, ret_from_exc - 8
  440. mfs r6, resr
  441. mfs r7, rfsr; /* save FSR */
  442. mts rfsr, r0; /* Clear sticky fsr */
  443. rted r0, full_exception
  444. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  445. /*
  446. * Unaligned data trap.
  447. *
  448. * Unaligned data trap last on 4k page is handled here.
  449. *
  450. * Trap entered via exception, so EE bit is set, and interrupts
  451. * are masked. This is nice, means we don't have to CLI before state save
  452. *
  453. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  454. */
  455. C_ENTRY(unaligned_data_trap):
  456. /* MS: I have to save r11 value and then restore it because
  457. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  458. * instructions are not used. We don't need to do if MSR instructions
  459. * are used and they use r0 instead of r11.
  460. * I am using ENTRY_SP which should be primary used only for stack
  461. * pointer saving. */
  462. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  463. set_bip; /* equalize initial state for all possible entries */
  464. clear_eip;
  465. set_ee;
  466. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  467. SAVE_STATE /* Save registers.*/
  468. /* PC, before IRQ/trap - this is one instruction above */
  469. swi r17, r1, PT_PC;
  470. tovirt(r1,r1)
  471. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  472. addik r15, r0, ret_from_exc-8
  473. mfs r3, resr /* ESR */
  474. mfs r4, rear /* EAR */
  475. rtbd r0, _unaligned_data_exception
  476. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  477. /*
  478. * Page fault traps.
  479. *
  480. * If the real exception handler (from hw_exception_handler.S) didn't find
  481. * the mapping for the process, then we're thrown here to handle such situation.
  482. *
  483. * Trap entered via exceptions, so EE bit is set, and interrupts
  484. * are masked. This is nice, means we don't have to CLI before state save
  485. *
  486. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  487. * will bail out to this point if they can't resolve the lightweight TLB fault.
  488. *
  489. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  490. * void do_page_fault(struct pt_regs *regs,
  491. * unsigned long address,
  492. * unsigned long error_code)
  493. */
  494. /* data and intruction trap - which is choose is resolved int fault.c */
  495. C_ENTRY(page_fault_data_trap):
  496. SAVE_STATE /* Save registers.*/
  497. /* PC, before IRQ/trap - this is one instruction above */
  498. swi r17, r1, PT_PC;
  499. tovirt(r1,r1)
  500. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  501. addik r15, r0, ret_from_exc-8
  502. mfs r6, rear /* parameter unsigned long address */
  503. mfs r7, resr /* parameter unsigned long error_code */
  504. rted r0, do_page_fault
  505. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  506. C_ENTRY(page_fault_instr_trap):
  507. SAVE_STATE /* Save registers.*/
  508. /* PC, before IRQ/trap - this is one instruction above */
  509. swi r17, r1, PT_PC;
  510. tovirt(r1,r1)
  511. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  512. addik r15, r0, ret_from_exc-8
  513. mfs r6, rear /* parameter unsigned long address */
  514. ori r7, r0, 0 /* parameter unsigned long error_code */
  515. rted r0, do_page_fault
  516. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  517. /* Entry point used to return from an exception. */
  518. C_ENTRY(ret_from_exc):
  519. lwi r11, r1, PT_MODE;
  520. bnei r11, 2f; /* See if returning to kernel mode, */
  521. /* ... if so, skip resched &c. */
  522. /* We're returning to user mode, so check for various conditions that
  523. trigger rescheduling. */
  524. 1:
  525. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  526. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  527. andi r11, r19, _TIF_NEED_RESCHED;
  528. beqi r11, 5f;
  529. /* Call the scheduler before returning from a syscall/trap. */
  530. bralid r15, schedule; /* Call scheduler */
  531. nop; /* delay slot */
  532. bri 1b
  533. /* Maybe handle a signal */
  534. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  535. beqi r11, 4f; /* Signals to handle, handle them */
  536. /*
  537. * Handle a signal return; Pending signals should be in r18.
  538. *
  539. * Not all registers are saved by the normal trap/interrupt entry
  540. * points (for instance, call-saved registers (because the normal
  541. * C-compiler calling sequence in the kernel makes sure they're
  542. * preserved), and call-clobbered registers in the case of
  543. * traps), but signal handlers may want to examine or change the
  544. * complete register state. Here we save anything not saved by
  545. * the normal entry sequence, so that it may be safely restored
  546. * (in a possibly modified form) after do_notify_resume returns. */
  547. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  548. bralid r15, do_notify_resume; /* Handle any signals */
  549. addi r6, r0, 0; /* Arg 2: int in_syscall */
  550. bri 1b
  551. /* Finally, return to user state. */
  552. 4: set_bip; /* Ints masked for state restore */
  553. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  554. VM_OFF;
  555. tophys(r1,r1);
  556. RESTORE_REGS_RTBD;
  557. addik r1, r1, PT_SIZE /* Clean up stack space. */
  558. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  559. bri 6f;
  560. /* Return to kernel state. */
  561. 2: set_bip; /* Ints masked for state restore */
  562. VM_OFF;
  563. tophys(r1,r1);
  564. RESTORE_REGS_RTBD;
  565. addik r1, r1, PT_SIZE /* Clean up stack space. */
  566. tovirt(r1,r1);
  567. 6:
  568. EXC_return: /* Make global symbol for debugging */
  569. rtbd r14, 0; /* Instructions to return from an IRQ */
  570. nop;
  571. /*
  572. * HW EXCEPTION rutine end
  573. */
  574. /*
  575. * Hardware maskable interrupts.
  576. *
  577. * The stack-pointer (r1) should have already been saved to the memory
  578. * location PER_CPU(ENTRY_SP).
  579. */
  580. C_ENTRY(_interrupt):
  581. /* MS: we are in physical address */
  582. /* Save registers, switch to proper stack, convert SP to virtual.*/
  583. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  584. /* MS: See if already in kernel mode. */
  585. mfs r1, rmsr
  586. nop
  587. andi r1, r1, MSR_UMS
  588. bnei r1, 1f
  589. /* Kernel-mode state save. */
  590. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  591. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  592. /* save registers */
  593. /* MS: Make room on the stack -> activation record */
  594. addik r1, r1, -PT_SIZE;
  595. SAVE_REGS
  596. brid 2f;
  597. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  598. 1:
  599. /* User-mode state save. */
  600. /* MS: get the saved current */
  601. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  602. tophys(r1,r1);
  603. lwi r1, r1, TS_THREAD_INFO;
  604. addik r1, r1, THREAD_SIZE;
  605. tophys(r1,r1);
  606. /* save registers */
  607. addik r1, r1, -PT_SIZE;
  608. SAVE_REGS
  609. /* calculate mode */
  610. swi r0, r1, PT_MODE;
  611. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  612. swi r11, r1, PT_R1;
  613. clear_ums;
  614. 2:
  615. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  616. tovirt(r1,r1)
  617. addik r15, r0, irq_call;
  618. irq_call:rtbd r0, do_IRQ;
  619. addik r5, r1, 0;
  620. /* MS: we are in virtual mode */
  621. ret_from_irq:
  622. lwi r11, r1, PT_MODE;
  623. bnei r11, 2f;
  624. 1:
  625. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  626. lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
  627. andi r11, r19, _TIF_NEED_RESCHED;
  628. beqi r11, 5f
  629. bralid r15, schedule;
  630. nop; /* delay slot */
  631. bri 1b
  632. /* Maybe handle a signal */
  633. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  634. beqid r11, no_intr_resched
  635. /* Handle a signal return; Pending signals should be in r18. */
  636. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  637. bralid r15, do_notify_resume; /* Handle any signals */
  638. addi r6, r0, 0; /* Arg 2: int in_syscall */
  639. bri 1b
  640. /* Finally, return to user state. */
  641. no_intr_resched:
  642. /* Disable interrupts, we are now committed to the state restore */
  643. disable_irq
  644. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  645. VM_OFF;
  646. tophys(r1,r1);
  647. RESTORE_REGS
  648. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  649. lwi r1, r1, PT_R1 - PT_SIZE;
  650. bri 6f;
  651. /* MS: Return to kernel state. */
  652. 2:
  653. #ifdef CONFIG_PREEMPT
  654. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  655. /* MS: get preempt_count from thread info */
  656. lwi r5, r11, TI_PREEMPT_COUNT;
  657. bgti r5, restore;
  658. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  659. andi r5, r5, _TIF_NEED_RESCHED;
  660. beqi r5, restore /* if zero jump over */
  661. preempt:
  662. /* interrupts are off that's why I am calling preempt_chedule_irq */
  663. bralid r15, preempt_schedule_irq
  664. nop
  665. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  666. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  667. andi r5, r5, _TIF_NEED_RESCHED;
  668. bnei r5, preempt /* if non zero jump to resched */
  669. restore:
  670. #endif
  671. VM_OFF /* MS: turn off MMU */
  672. tophys(r1,r1)
  673. RESTORE_REGS
  674. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  675. tovirt(r1,r1);
  676. 6:
  677. IRQ_return: /* MS: Make global symbol for debugging */
  678. rtid r14, 0
  679. nop
  680. /*
  681. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  682. * and call handling function with saved pt_regs
  683. */
  684. C_ENTRY(_debug_exception):
  685. /* BIP bit is set on entry, no interrupts can occur */
  686. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  687. mfs r1, rmsr
  688. nop
  689. andi r1, r1, MSR_UMS
  690. bnei r1, 1f
  691. /* MS: Kernel-mode state save - kgdb */
  692. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  693. /* BIP bit is set on entry, no interrupts can occur */
  694. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  695. SAVE_REGS;
  696. /* save all regs to pt_reg structure */
  697. swi r0, r1, PT_R0; /* R0 must be saved too */
  698. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  699. swi r16, r1, PT_PC; /* PC and r16 are the same */
  700. /* save special purpose registers to pt_regs */
  701. mfs r11, rear;
  702. swi r11, r1, PT_EAR;
  703. mfs r11, resr;
  704. swi r11, r1, PT_ESR;
  705. mfs r11, rfsr;
  706. swi r11, r1, PT_FSR;
  707. /* stack pointer is in physical address at it is decrease
  708. * by PT_SIZE but we need to get correct R1 value */
  709. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  710. swi r11, r1, PT_R1
  711. /* MS: r31 - current pointer isn't changed */
  712. tovirt(r1,r1)
  713. #ifdef CONFIG_KGDB
  714. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  715. addik r15, r0, dbtrap_call; /* return address */
  716. rtbd r0, microblaze_kgdb_break
  717. nop;
  718. #endif
  719. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  720. * It is very unlikely that another brki instruction is called. */
  721. bri 0
  722. /* MS: User-mode state save - gdb */
  723. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  724. tophys(r1,r1);
  725. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  726. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  727. tophys(r1,r1);
  728. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  729. SAVE_REGS;
  730. swi r16, r1, PT_PC; /* Save LP */
  731. swi r0, r1, PT_MODE; /* Was in user-mode. */
  732. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  733. swi r11, r1, PT_R1; /* Store user SP. */
  734. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  735. tovirt(r1,r1)
  736. set_vms;
  737. addik r5, r1, 0;
  738. addik r15, r0, dbtrap_call;
  739. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  740. rtbd r0, sw_exception
  741. nop
  742. /* MS: The first instruction for the second part of the gdb/kgdb */
  743. set_bip; /* Ints masked for state restore */
  744. lwi r11, r1, PT_MODE;
  745. bnei r11, 2f;
  746. /* MS: Return to user space - gdb */
  747. 1:
  748. /* Get current task ptr into r11 */
  749. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  750. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  751. andi r11, r19, _TIF_NEED_RESCHED;
  752. beqi r11, 5f;
  753. /* Call the scheduler before returning from a syscall/trap. */
  754. bralid r15, schedule; /* Call scheduler */
  755. nop; /* delay slot */
  756. bri 1b
  757. /* Maybe handle a signal */
  758. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  759. beqi r11, 4f; /* Signals to handle, handle them */
  760. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  761. bralid r15, do_notify_resume; /* Handle any signals */
  762. addi r6, r0, 0; /* Arg 2: int in_syscall */
  763. bri 1b
  764. /* Finally, return to user state. */
  765. 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  766. VM_OFF;
  767. tophys(r1,r1);
  768. /* MS: Restore all regs */
  769. RESTORE_REGS_RTBD
  770. addik r1, r1, PT_SIZE /* Clean up stack space */
  771. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  772. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  773. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  774. nop;
  775. /* MS: Return to kernel state - kgdb */
  776. 2: VM_OFF;
  777. tophys(r1,r1);
  778. /* MS: Restore all regs */
  779. RESTORE_REGS_RTBD
  780. lwi r14, r1, PT_R14;
  781. lwi r16, r1, PT_PC;
  782. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  783. tovirt(r1,r1);
  784. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  785. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  786. nop;
  787. ENTRY(_switch_to)
  788. /* prepare return value */
  789. addk r3, r0, CURRENT_TASK
  790. /* save registers in cpu_context */
  791. /* use r11 and r12, volatile registers, as temp register */
  792. /* give start of cpu_context for previous process */
  793. addik r11, r5, TI_CPU_CONTEXT
  794. swi r1, r11, CC_R1
  795. swi r2, r11, CC_R2
  796. /* skip volatile registers.
  797. * they are saved on stack when we jumped to _switch_to() */
  798. /* dedicated registers */
  799. swi r13, r11, CC_R13
  800. swi r14, r11, CC_R14
  801. swi r15, r11, CC_R15
  802. swi r16, r11, CC_R16
  803. swi r17, r11, CC_R17
  804. swi r18, r11, CC_R18
  805. /* save non-volatile registers */
  806. swi r19, r11, CC_R19
  807. swi r20, r11, CC_R20
  808. swi r21, r11, CC_R21
  809. swi r22, r11, CC_R22
  810. swi r23, r11, CC_R23
  811. swi r24, r11, CC_R24
  812. swi r25, r11, CC_R25
  813. swi r26, r11, CC_R26
  814. swi r27, r11, CC_R27
  815. swi r28, r11, CC_R28
  816. swi r29, r11, CC_R29
  817. swi r30, r11, CC_R30
  818. /* special purpose registers */
  819. mfs r12, rmsr
  820. swi r12, r11, CC_MSR
  821. mfs r12, rear
  822. swi r12, r11, CC_EAR
  823. mfs r12, resr
  824. swi r12, r11, CC_ESR
  825. mfs r12, rfsr
  826. swi r12, r11, CC_FSR
  827. /* update r31, the current-give me pointer to task which will be next */
  828. lwi CURRENT_TASK, r6, TI_TASK
  829. /* stored it to current_save too */
  830. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  831. /* get new process' cpu context and restore */
  832. /* give me start where start context of next task */
  833. addik r11, r6, TI_CPU_CONTEXT
  834. /* non-volatile registers */
  835. lwi r30, r11, CC_R30
  836. lwi r29, r11, CC_R29
  837. lwi r28, r11, CC_R28
  838. lwi r27, r11, CC_R27
  839. lwi r26, r11, CC_R26
  840. lwi r25, r11, CC_R25
  841. lwi r24, r11, CC_R24
  842. lwi r23, r11, CC_R23
  843. lwi r22, r11, CC_R22
  844. lwi r21, r11, CC_R21
  845. lwi r20, r11, CC_R20
  846. lwi r19, r11, CC_R19
  847. /* dedicated registers */
  848. lwi r18, r11, CC_R18
  849. lwi r17, r11, CC_R17
  850. lwi r16, r11, CC_R16
  851. lwi r15, r11, CC_R15
  852. lwi r14, r11, CC_R14
  853. lwi r13, r11, CC_R13
  854. /* skip volatile registers */
  855. lwi r2, r11, CC_R2
  856. lwi r1, r11, CC_R1
  857. /* special purpose registers */
  858. lwi r12, r11, CC_FSR
  859. mts rfsr, r12
  860. lwi r12, r11, CC_MSR
  861. mts rmsr, r12
  862. rtsd r15, 8
  863. nop
  864. ENTRY(_reset)
  865. brai 0; /* Jump to reset vector */
  866. /* These are compiled and loaded into high memory, then
  867. * copied into place in mach_early_setup */
  868. .section .init.ivt, "ax"
  869. #if CONFIG_MANUAL_RESET_VECTOR
  870. .org 0x0
  871. brai CONFIG_MANUAL_RESET_VECTOR
  872. #endif
  873. .org 0x8
  874. brai TOPHYS(_user_exception); /* syscall handler */
  875. .org 0x10
  876. brai TOPHYS(_interrupt); /* Interrupt handler */
  877. .org 0x18
  878. brai TOPHYS(_debug_exception); /* debug trap handler */
  879. .org 0x20
  880. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  881. .section .rodata,"a"
  882. #include "syscall_table.S"
  883. syscall_table_size=(.-sys_call_table)
  884. type_SYSCALL:
  885. .ascii "SYSCALL\0"
  886. type_IRQ:
  887. .ascii "IRQ\0"
  888. type_IRQ_PREEMPT:
  889. .ascii "IRQ (PREEMPTED)\0"
  890. type_SYSCALL_PREEMPT:
  891. .ascii " SYSCALL (PREEMPTED)\0"
  892. /*
  893. * Trap decoding for stack unwinder
  894. * Tuples are (start addr, end addr, string)
  895. * If return address lies on [start addr, end addr],
  896. * unwinder displays 'string'
  897. */
  898. .align 4
  899. .global microblaze_trap_handlers
  900. microblaze_trap_handlers:
  901. /* Exact matches come first */
  902. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  903. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  904. /* Fuzzy matches go here */
  905. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  906. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  907. /* End of table */
  908. .word 0 ; .word 0 ; .word 0