entry.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*
  2. * linux/arch/nios2/kernel/entry.S
  3. *
  4. * Copyright (C) 2013-2014 Altera Corporation
  5. * Copyright (C) 2009, Wind River Systems Inc
  6. *
  7. * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
  8. *
  9. * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  10. * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
  11. * Kenneth Albanowski <kjahds@kjahds.com>,
  12. * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
  13. * Copyright (C) 2004 Microtronix Datacom Ltd.
  14. *
  15. * This file is subject to the terms and conditions of the GNU General Public
  16. * License. See the file "COPYING" in the main directory of this archive
  17. * for more details.
  18. *
  19. * Linux/m68k support by Hamish Macdonald
  20. *
  21. * 68060 fixes by Jesper Skov
  22. * ColdFire support by Greg Ungerer (gerg@snapgear.com)
  23. * 5307 fixes by David W. Miller
  24. * linux 2.4 support David McCullough <davidm@snapgear.com>
  25. */
  26. #include <linux/sys.h>
  27. #include <linux/linkage.h>
  28. #include <asm/asm-offsets.h>
  29. #include <asm/asm-macros.h>
  30. #include <asm/thread_info.h>
  31. #include <asm/errno.h>
  32. #include <asm/setup.h>
  33. #include <asm/entry.h>
  34. #include <asm/unistd.h>
  35. #include <asm/processor.h>
  36. .macro GET_THREAD_INFO reg
  37. .if THREAD_SIZE & 0xffff0000
  38. andhi \reg, sp, %hi(~(THREAD_SIZE-1))
  39. .else
  40. addi \reg, r0, %lo(~(THREAD_SIZE-1))
  41. and \reg, \reg, sp
  42. .endif
  43. .endm
  44. .macro kuser_cmpxchg_check
  45. /*
  46. * Make sure our user space atomic helper is restarted if it was
  47. * interrupted in a critical region.
  48. * ea-4 = address of interrupted insn (ea must be preserved).
  49. * sp = saved regs.
  50. * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
  51. * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
  52. * cmpxchg_ldw + 4.
  53. */
  54. /* et = cmpxchg_stw + 4 */
  55. movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
  56. bgtu ea, et, 1f
  57. subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
  58. bltu ea, et, 1f
  59. stw et, PT_EA(sp) /* fix up EA */
  60. mov ea, et
  61. 1:
  62. .endm
  63. .section .rodata
  64. .align 4
  65. exception_table:
  66. .word unhandled_exception /* 0 - Reset */
  67. .word unhandled_exception /* 1 - Processor-only Reset */
  68. .word external_interrupt /* 2 - Interrupt */
  69. .word handle_trap /* 3 - Trap Instruction */
  70. .word instruction_trap /* 4 - Unimplemented instruction */
  71. .word handle_illegal /* 5 - Illegal instruction */
  72. .word handle_unaligned /* 6 - Misaligned data access */
  73. .word handle_unaligned /* 7 - Misaligned destination address */
  74. .word handle_diverror /* 8 - Division error */
  75. .word protection_exception_ba /* 9 - Supervisor-only instr. address */
  76. .word protection_exception_instr /* 10 - Supervisor only instruction */
  77. .word protection_exception_ba /* 11 - Supervisor only data address */
  78. .word unhandled_exception /* 12 - Double TLB miss (data) */
  79. .word protection_exception_pte /* 13 - TLB permission violation (x) */
  80. .word protection_exception_pte /* 14 - TLB permission violation (r) */
  81. .word protection_exception_pte /* 15 - TLB permission violation (w) */
  82. .word unhandled_exception /* 16 - MPU region violation */
  83. trap_table:
  84. .word handle_system_call /* 0 */
  85. .word instruction_trap /* 1 */
  86. .word instruction_trap /* 2 */
  87. .word instruction_trap /* 3 */
  88. .word instruction_trap /* 4 */
  89. .word instruction_trap /* 5 */
  90. .word instruction_trap /* 6 */
  91. .word instruction_trap /* 7 */
  92. .word instruction_trap /* 8 */
  93. .word instruction_trap /* 9 */
  94. .word instruction_trap /* 10 */
  95. .word instruction_trap /* 11 */
  96. .word instruction_trap /* 12 */
  97. .word instruction_trap /* 13 */
  98. .word instruction_trap /* 14 */
  99. .word instruction_trap /* 15 */
  100. .word instruction_trap /* 16 */
  101. .word instruction_trap /* 17 */
  102. .word instruction_trap /* 18 */
  103. .word instruction_trap /* 19 */
  104. .word instruction_trap /* 20 */
  105. .word instruction_trap /* 21 */
  106. .word instruction_trap /* 22 */
  107. .word instruction_trap /* 23 */
  108. .word instruction_trap /* 24 */
  109. .word instruction_trap /* 25 */
  110. .word instruction_trap /* 26 */
  111. .word instruction_trap /* 27 */
  112. .word instruction_trap /* 28 */
  113. .word instruction_trap /* 29 */
  114. .word instruction_trap /* 30 */
  115. .word handle_breakpoint /* 31 */
  116. .text
  117. .set noat
  118. .set nobreak
  119. ENTRY(inthandler)
  120. SAVE_ALL
  121. kuser_cmpxchg_check
  122. /* Clear EH bit before we get a new excpetion in the kernel
  123. * and after we have saved it to the exception frame. This is done
  124. * whether it's trap, tlb-miss or interrupt. If we don't do this
  125. * estatus is not updated the next exception.
  126. */
  127. rdctl r24, status
  128. movi r9, %lo(~STATUS_EH)
  129. and r24, r24, r9
  130. wrctl status, r24
  131. /* Read cause and vector and branch to the associated handler */
  132. mov r4, sp
  133. rdctl r5, exception
  134. movia r9, exception_table
  135. add r24, r9, r5
  136. ldw r24, 0(r24)
  137. jmp r24
  138. /***********************************************************************
  139. * Handle traps
  140. ***********************************************************************
  141. */
  142. ENTRY(handle_trap)
  143. ldw r24, -4(ea) /* instruction that caused the exception */
  144. srli r24, r24, 4
  145. andi r24, r24, 0x7c
  146. movia r9,trap_table
  147. add r24, r24, r9
  148. ldw r24, 0(r24)
  149. jmp r24
  150. /***********************************************************************
  151. * Handle system calls
  152. ***********************************************************************
  153. */
  154. ENTRY(handle_system_call)
  155. /* Enable interrupts */
  156. rdctl r10, status
  157. ori r10, r10, STATUS_PIE
  158. wrctl status, r10
  159. /* Reload registers destroyed by common code. */
  160. ldw r4, PT_R4(sp)
  161. ldw r5, PT_R5(sp)
  162. local_restart:
  163. /* Check that the requested system call is within limits */
  164. movui r1, __NR_syscalls
  165. bgeu r2, r1, ret_invsyscall
  166. slli r1, r2, 2
  167. movhi r11, %hiadj(sys_call_table)
  168. add r1, r1, r11
  169. ldw r1, %lo(sys_call_table)(r1)
  170. beq r1, r0, ret_invsyscall
  171. /* Check if we are being traced */
  172. GET_THREAD_INFO r11
  173. ldw r11,TI_FLAGS(r11)
  174. BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
  175. /* Execute the system call */
  176. callr r1
  177. /* If the syscall returns a negative result:
  178. * Set r7 to 1 to indicate error,
  179. * Negate r2 to get a positive error code
  180. * If the syscall returns zero or a positive value:
  181. * Set r7 to 0.
  182. * The sigreturn system calls will skip the code below by
  183. * adding to register ra. To avoid destroying registers
  184. */
  185. translate_rc_and_ret:
  186. movi r1, 0
  187. bge r2, zero, 3f
  188. sub r2, zero, r2
  189. movi r1, 1
  190. 3:
  191. stw r2, PT_R2(sp)
  192. stw r1, PT_R7(sp)
  193. end_translate_rc_and_ret:
  194. ret_from_exception:
  195. ldw r1, PT_ESTATUS(sp)
  196. /* if so, skip resched, signals */
  197. TSTBNZ r1, r1, ESTATUS_EU, Luser_return
  198. restore_all:
  199. rdctl r10, status /* disable intrs */
  200. andi r10, r10, %lo(~STATUS_PIE)
  201. wrctl status, r10
  202. RESTORE_ALL
  203. eret
  204. /* If the syscall number was invalid return ENOSYS */
  205. ret_invsyscall:
  206. movi r2, -ENOSYS
  207. br translate_rc_and_ret
  208. /* This implements the same as above, except it calls
  209. * do_syscall_trace_enter and do_syscall_trace_exit before and after the
  210. * syscall in order for utilities like strace and gdb to work.
  211. */
  212. traced_system_call:
  213. SAVE_SWITCH_STACK
  214. call do_syscall_trace_enter
  215. RESTORE_SWITCH_STACK
  216. /* Create system call register arguments. The 5th and 6th
  217. arguments on stack are already in place at the beginning
  218. of pt_regs. */
  219. ldw r2, PT_R2(sp)
  220. ldw r4, PT_R4(sp)
  221. ldw r5, PT_R5(sp)
  222. ldw r6, PT_R6(sp)
  223. ldw r7, PT_R7(sp)
  224. /* Fetch the syscall function, we don't need to check the boundaries
  225. * since this is already done.
  226. */
  227. slli r1, r2, 2
  228. movhi r11,%hiadj(sys_call_table)
  229. add r1, r1, r11
  230. ldw r1, %lo(sys_call_table)(r1)
  231. callr r1
  232. /* If the syscall returns a negative result:
  233. * Set r7 to 1 to indicate error,
  234. * Negate r2 to get a positive error code
  235. * If the syscall returns zero or a positive value:
  236. * Set r7 to 0.
  237. * The sigreturn system calls will skip the code below by
  238. * adding to register ra. To avoid destroying registers
  239. */
  240. translate_rc_and_ret2:
  241. movi r1, 0
  242. bge r2, zero, 4f
  243. sub r2, zero, r2
  244. movi r1, 1
  245. 4:
  246. stw r2, PT_R2(sp)
  247. stw r1, PT_R7(sp)
  248. end_translate_rc_and_ret2:
  249. SAVE_SWITCH_STACK
  250. call do_syscall_trace_exit
  251. RESTORE_SWITCH_STACK
  252. br ret_from_exception
  253. Luser_return:
  254. GET_THREAD_INFO r11 /* get thread_info pointer */
  255. ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
  256. ANDI32 r11, r10, _TIF_WORK_MASK
  257. beq r11, r0, restore_all /* Nothing to do */
  258. BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
  259. /* Reschedule work */
  260. call schedule
  261. br ret_from_exception
  262. Lsignal_return:
  263. ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
  264. beq r1, r0, restore_all
  265. mov r4, sp /* pt_regs */
  266. SAVE_SWITCH_STACK
  267. call do_notify_resume
  268. beq r2, r0, no_work_pending
  269. RESTORE_SWITCH_STACK
  270. /* prepare restart syscall here without leaving kernel */
  271. ldw r2, PT_R2(sp) /* reload syscall number in r2 */
  272. ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
  273. ldw r5, PT_R5(sp)
  274. ldw r6, PT_R6(sp)
  275. ldw r7, PT_R7(sp)
  276. ldw r8, PT_R8(sp)
  277. ldw r9, PT_R9(sp)
  278. br local_restart /* restart syscall */
  279. no_work_pending:
  280. RESTORE_SWITCH_STACK
  281. br ret_from_exception
  282. /***********************************************************************
  283. * Handle external interrupts.
  284. ***********************************************************************
  285. */
  286. /*
  287. * This is the generic interrupt handler (for all hardware interrupt
  288. * sources). It figures out the vector number and calls the appropriate
  289. * interrupt service routine directly.
  290. */
  291. external_interrupt:
  292. rdctl r12, ipending
  293. rdctl r9, ienable
  294. and r12, r12, r9
  295. /* skip if no interrupt is pending */
  296. beq r12, r0, ret_from_interrupt
  297. movi r24, -1
  298. stw r24, PT_ORIG_R2(sp)
  299. /*
  300. * Process an external hardware interrupt.
  301. */
  302. addi ea, ea, -4 /* re-issue the interrupted instruction */
  303. stw ea, PT_EA(sp)
  304. 2: movi r4, %lo(-1) /* Start from bit position 0,
  305. highest priority */
  306. /* This is the IRQ # for handler call */
  307. 1: andi r10, r12, 1 /* Isolate bit we are interested in */
  308. srli r12, r12, 1 /* shift count is costly without hardware
  309. multiplier */
  310. addi r4, r4, 1
  311. beq r10, r0, 1b
  312. mov r5, sp /* Setup pt_regs pointer for handler call */
  313. call do_IRQ
  314. rdctl r12, ipending /* check again if irq still pending */
  315. rdctl r9, ienable /* Isolate possible interrupts */
  316. and r12, r12, r9
  317. bne r12, r0, 2b
  318. /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
  319. ENTRY(ret_from_interrupt)
  320. ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
  321. TSTBNZ r1, r1, ESTATUS_EU, Luser_return
  322. #ifdef CONFIG_PREEMPT
  323. GET_THREAD_INFO r1
  324. ldw r4, TI_PREEMPT_COUNT(r1)
  325. bne r4, r0, restore_all
  326. need_resched:
  327. ldw r4, TI_FLAGS(r1) /* ? Need resched set */
  328. BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
  329. ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
  330. andi r10, r4, ESTATUS_EPIE
  331. beq r10, r0, restore_all
  332. movia r4, PREEMPT_ACTIVE
  333. stw r4, TI_PREEMPT_COUNT(r1)
  334. rdctl r10, status /* enable intrs again */
  335. ori r10, r10 ,STATUS_PIE
  336. wrctl status, r10
  337. PUSH r1
  338. call schedule
  339. POP r1
  340. mov r4, r0
  341. stw r4, TI_PREEMPT_COUNT(r1)
  342. rdctl r10, status /* disable intrs */
  343. andi r10, r10, %lo(~STATUS_PIE)
  344. wrctl status, r10
  345. br need_resched
  346. #else
  347. br restore_all
  348. #endif
  349. /***********************************************************************
  350. * A few syscall wrappers
  351. ***********************************************************************
  352. */
  353. /*
  354. * int clone(unsigned long clone_flags, unsigned long newsp,
  355. * int __user * parent_tidptr, int __user * child_tidptr,
  356. * int tls_val)
  357. */
  358. ENTRY(sys_clone)
  359. SAVE_SWITCH_STACK
  360. addi sp, sp, -4
  361. stw r7, 0(sp) /* Pass 5th arg thru stack */
  362. mov r7, r6 /* 4th arg is 3rd of clone() */
  363. mov r6, zero /* 3rd arg always 0 */
  364. call do_fork
  365. addi sp, sp, 4
  366. RESTORE_SWITCH_STACK
  367. ret
  368. ENTRY(sys_rt_sigreturn)
  369. SAVE_SWITCH_STACK
  370. mov r4, sp
  371. call do_rt_sigreturn
  372. RESTORE_SWITCH_STACK
  373. addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
  374. ret
  375. /***********************************************************************
  376. * A few other wrappers and stubs
  377. ***********************************************************************
  378. */
  379. protection_exception_pte:
  380. rdctl r6, pteaddr
  381. slli r6, r6, 10
  382. call do_page_fault
  383. br ret_from_exception
  384. protection_exception_ba:
  385. rdctl r6, badaddr
  386. call do_page_fault
  387. br ret_from_exception
  388. protection_exception_instr:
  389. call handle_supervisor_instr
  390. br ret_from_exception
  391. handle_breakpoint:
  392. call breakpoint_c
  393. br ret_from_exception
  394. #ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
  395. handle_unaligned:
  396. SAVE_SWITCH_STACK
  397. call handle_unaligned_c
  398. RESTORE_SWITCH_STACK
  399. br ret_from_exception
  400. #else
  401. handle_unaligned:
  402. call handle_unaligned_c
  403. br ret_from_exception
  404. #endif
  405. handle_illegal:
  406. call handle_illegal_c
  407. br ret_from_exception
  408. handle_diverror:
  409. call handle_diverror_c
  410. br ret_from_exception
  411. /*
  412. * Beware - when entering resume, prev (the current task) is
  413. * in r4, next (the new task) is in r5, don't change these
  414. * registers.
  415. */
  416. ENTRY(resume)
  417. rdctl r7, status /* save thread status reg */
  418. stw r7, TASK_THREAD + THREAD_KPSR(r4)
  419. andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
  420. wrctl status, r7
  421. SAVE_SWITCH_STACK
  422. stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
  423. ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
  424. movia r24, _current_thread /* save thread */
  425. GET_THREAD_INFO r1
  426. stw r1, 0(r24)
  427. RESTORE_SWITCH_STACK
  428. ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
  429. wrctl status, r7
  430. ret
  431. ENTRY(ret_from_fork)
  432. call schedule_tail
  433. br ret_from_exception
  434. ENTRY(ret_from_kernel_thread)
  435. call schedule_tail
  436. mov r4,r17 /* arg */
  437. callr r16 /* function */
  438. br ret_from_exception
  439. /*
  440. * Kernel user helpers.
  441. *
  442. * Each segment is 64-byte aligned and will be mapped to the <User space>.
  443. * New segments (if ever needed) must be added after the existing ones.
  444. * This mechanism should be used only for things that are really small and
  445. * justified, and not be abused freely.
  446. *
  447. */
  448. /* Filling pads with undefined instructions. */
  449. .macro kuser_pad sym size
  450. .if ((. - \sym) & 3)
  451. .rept (4 - (. - \sym) & 3)
  452. .byte 0
  453. .endr
  454. .endif
  455. .rept ((\size - (. - \sym)) / 4)
  456. .word 0xdeadbeef
  457. .endr
  458. .endm
  459. .align 6
  460. .globl __kuser_helper_start
  461. __kuser_helper_start:
  462. __kuser_helper_version: /* @ 0x1000 */
  463. .word ((__kuser_helper_end - __kuser_helper_start) >> 6)
  464. __kuser_cmpxchg: /* @ 0x1004 */
  465. /*
  466. * r4 pointer to exchange variable
  467. * r5 old value
  468. * r6 new value
  469. */
  470. cmpxchg_ldw:
  471. ldw r2, 0(r4) /* load current value */
  472. sub r2, r2, r5 /* compare with old value */
  473. bne r2, zero, cmpxchg_ret
  474. /* We had a match, store the new value */
  475. cmpxchg_stw:
  476. stw r6, 0(r4)
  477. cmpxchg_ret:
  478. ret
  479. kuser_pad __kuser_cmpxchg, 64
  480. .globl __kuser_sigtramp
  481. __kuser_sigtramp:
  482. movi r2, __NR_rt_sigreturn
  483. trap
  484. kuser_pad __kuser_sigtramp, 64
  485. .globl __kuser_helper_end
  486. __kuser_helper_end: