entry.S 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Copyright (C) 2012 Regents of the University of California
  3. * Copyright (C) 2017 SiFive
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation, version 2.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/init.h>
  15. #include <linux/linkage.h>
  16. #include <asm/asm.h>
  17. #include <asm/csr.h>
  18. #include <asm/unistd.h>
  19. #include <asm/thread_info.h>
  20. #include <asm/asm-offsets.h>
  21. .text
  22. .altmacro
  23. /*
  24. * Prepares to enter a system call or exception by saving all registers to the
  25. * stack.
  26. */
  27. .macro SAVE_ALL
  28. LOCAL _restore_kernel_tpsp
  29. LOCAL _save_context
  30. /*
  31. * If coming from userspace, preserve the user thread pointer and load
  32. * the kernel thread pointer. If we came from the kernel, sscratch
  33. * will contain 0, and we should continue on the current TP.
  34. */
  35. csrrw tp, sscratch, tp
  36. bnez tp, _save_context
  37. _restore_kernel_tpsp:
  38. csrr tp, sscratch
  39. REG_S sp, TASK_TI_KERNEL_SP(tp)
  40. _save_context:
  41. REG_S sp, TASK_TI_USER_SP(tp)
  42. REG_L sp, TASK_TI_KERNEL_SP(tp)
  43. addi sp, sp, -(PT_SIZE_ON_STACK)
  44. REG_S x1, PT_RA(sp)
  45. REG_S x3, PT_GP(sp)
  46. REG_S x5, PT_T0(sp)
  47. REG_S x6, PT_T1(sp)
  48. REG_S x7, PT_T2(sp)
  49. REG_S x8, PT_S0(sp)
  50. REG_S x9, PT_S1(sp)
  51. REG_S x10, PT_A0(sp)
  52. REG_S x11, PT_A1(sp)
  53. REG_S x12, PT_A2(sp)
  54. REG_S x13, PT_A3(sp)
  55. REG_S x14, PT_A4(sp)
  56. REG_S x15, PT_A5(sp)
  57. REG_S x16, PT_A6(sp)
  58. REG_S x17, PT_A7(sp)
  59. REG_S x18, PT_S2(sp)
  60. REG_S x19, PT_S3(sp)
  61. REG_S x20, PT_S4(sp)
  62. REG_S x21, PT_S5(sp)
  63. REG_S x22, PT_S6(sp)
  64. REG_S x23, PT_S7(sp)
  65. REG_S x24, PT_S8(sp)
  66. REG_S x25, PT_S9(sp)
  67. REG_S x26, PT_S10(sp)
  68. REG_S x27, PT_S11(sp)
  69. REG_S x28, PT_T3(sp)
  70. REG_S x29, PT_T4(sp)
  71. REG_S x30, PT_T5(sp)
  72. REG_S x31, PT_T6(sp)
  73. /*
  74. * Disable user-mode memory access as it should only be set in the
  75. * actual user copy routines.
  76. *
  77. * Disable the FPU to detect illegal usage of floating point in kernel
  78. * space.
  79. */
  80. li t0, SR_SUM | SR_FS
  81. REG_L s0, TASK_TI_USER_SP(tp)
  82. csrrc s1, sstatus, t0
  83. csrr s2, sepc
  84. csrr s3, sbadaddr
  85. csrr s4, scause
  86. csrr s5, sscratch
  87. REG_S s0, PT_SP(sp)
  88. REG_S s1, PT_SSTATUS(sp)
  89. REG_S s2, PT_SEPC(sp)
  90. REG_S s3, PT_SBADADDR(sp)
  91. REG_S s4, PT_SCAUSE(sp)
  92. REG_S s5, PT_TP(sp)
  93. .endm
  94. /*
  95. * Prepares to return from a system call or exception by restoring all
  96. * registers from the stack.
  97. */
  98. .macro RESTORE_ALL
  99. REG_L a0, PT_SSTATUS(sp)
  100. REG_L a2, PT_SEPC(sp)
  101. csrw sstatus, a0
  102. csrw sepc, a2
  103. REG_L x1, PT_RA(sp)
  104. REG_L x3, PT_GP(sp)
  105. REG_L x4, PT_TP(sp)
  106. REG_L x5, PT_T0(sp)
  107. REG_L x6, PT_T1(sp)
  108. REG_L x7, PT_T2(sp)
  109. REG_L x8, PT_S0(sp)
  110. REG_L x9, PT_S1(sp)
  111. REG_L x10, PT_A0(sp)
  112. REG_L x11, PT_A1(sp)
  113. REG_L x12, PT_A2(sp)
  114. REG_L x13, PT_A3(sp)
  115. REG_L x14, PT_A4(sp)
  116. REG_L x15, PT_A5(sp)
  117. REG_L x16, PT_A6(sp)
  118. REG_L x17, PT_A7(sp)
  119. REG_L x18, PT_S2(sp)
  120. REG_L x19, PT_S3(sp)
  121. REG_L x20, PT_S4(sp)
  122. REG_L x21, PT_S5(sp)
  123. REG_L x22, PT_S6(sp)
  124. REG_L x23, PT_S7(sp)
  125. REG_L x24, PT_S8(sp)
  126. REG_L x25, PT_S9(sp)
  127. REG_L x26, PT_S10(sp)
  128. REG_L x27, PT_S11(sp)
  129. REG_L x28, PT_T3(sp)
  130. REG_L x29, PT_T4(sp)
  131. REG_L x30, PT_T5(sp)
  132. REG_L x31, PT_T6(sp)
  133. REG_L x2, PT_SP(sp)
  134. .endm
  135. ENTRY(handle_exception)
  136. SAVE_ALL
  137. /*
  138. * Set sscratch register to 0, so that if a recursive exception
  139. * occurs, the exception vector knows it came from the kernel
  140. */
  141. csrw sscratch, x0
  142. /* Load the global pointer */
  143. .option push
  144. .option norelax
  145. la gp, __global_pointer$
  146. .option pop
  147. la ra, ret_from_exception
  148. /*
  149. * MSB of cause differentiates between
  150. * interrupts and exceptions
  151. */
  152. bge s4, zero, 1f
  153. /* Handle interrupts */
  154. move a0, sp /* pt_regs */
  155. tail do_IRQ
  156. 1:
  157. /* Exceptions run with interrupts enabled */
  158. csrs sstatus, SR_SIE
  159. /* Handle syscalls */
  160. li t0, EXC_SYSCALL
  161. beq s4, t0, handle_syscall
  162. /* Handle other exceptions */
  163. slli t0, s4, RISCV_LGPTR
  164. la t1, excp_vect_table
  165. la t2, excp_vect_table_end
  166. move a0, sp /* pt_regs */
  167. add t0, t1, t0
  168. /* Check if exception code lies within bounds */
  169. bgeu t0, t2, 1f
  170. REG_L t0, 0(t0)
  171. jr t0
  172. 1:
  173. tail do_trap_unknown
  174. handle_syscall:
  175. /* save the initial A0 value (needed in signal handlers) */
  176. REG_S a0, PT_ORIG_A0(sp)
  177. /*
  178. * Advance SEPC to avoid executing the original
  179. * scall instruction on sret
  180. */
  181. addi s2, s2, 0x4
  182. REG_S s2, PT_SEPC(sp)
  183. /* Trace syscalls, but only if requested by the user. */
  184. REG_L t0, TASK_TI_FLAGS(tp)
  185. andi t0, t0, _TIF_SYSCALL_TRACE
  186. bnez t0, handle_syscall_trace_enter
  187. check_syscall_nr:
  188. /* Check to make sure we don't jump to a bogus syscall number. */
  189. li t0, __NR_syscalls
  190. la s0, sys_ni_syscall
  191. /* Syscall number held in a7 */
  192. bgeu a7, t0, 1f
  193. la s0, sys_call_table
  194. slli t0, a7, RISCV_LGPTR
  195. add s0, s0, t0
  196. REG_L s0, 0(s0)
  197. 1:
  198. jalr s0
  199. ret_from_syscall:
  200. /* Set user a0 to kernel a0 */
  201. REG_S a0, PT_A0(sp)
  202. /* Trace syscalls, but only if requested by the user. */
  203. REG_L t0, TASK_TI_FLAGS(tp)
  204. andi t0, t0, _TIF_SYSCALL_TRACE
  205. bnez t0, handle_syscall_trace_exit
  206. ret_from_exception:
  207. REG_L s0, PT_SSTATUS(sp)
  208. csrc sstatus, SR_SIE
  209. andi s0, s0, SR_SPP
  210. bnez s0, restore_all
  211. resume_userspace:
  212. /* Interrupts must be disabled here so flags are checked atomically */
  213. REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
  214. andi s1, s0, _TIF_WORK_MASK
  215. bnez s1, work_pending
  216. /* Save unwound kernel stack pointer in thread_info */
  217. addi s0, sp, PT_SIZE_ON_STACK
  218. REG_S s0, TASK_TI_KERNEL_SP(tp)
  219. /*
  220. * Save TP into sscratch, so we can find the kernel data structures
  221. * again.
  222. */
  223. csrw sscratch, tp
  224. restore_all:
  225. RESTORE_ALL
  226. sret
  227. work_pending:
  228. /* Enter slow path for supplementary processing */
  229. la ra, ret_from_exception
  230. andi s1, s0, _TIF_NEED_RESCHED
  231. bnez s1, work_resched
  232. work_notifysig:
  233. /* Handle pending signals and notify-resume requests */
  234. csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
  235. move a0, sp /* pt_regs */
  236. move a1, s0 /* current_thread_info->flags */
  237. tail do_notify_resume
  238. work_resched:
  239. tail schedule
  240. /* Slow paths for ptrace. */
  241. handle_syscall_trace_enter:
  242. move a0, sp
  243. call do_syscall_trace_enter
  244. REG_L a0, PT_A0(sp)
  245. REG_L a1, PT_A1(sp)
  246. REG_L a2, PT_A2(sp)
  247. REG_L a3, PT_A3(sp)
  248. REG_L a4, PT_A4(sp)
  249. REG_L a5, PT_A5(sp)
  250. REG_L a6, PT_A6(sp)
  251. REG_L a7, PT_A7(sp)
  252. j check_syscall_nr
  253. handle_syscall_trace_exit:
  254. move a0, sp
  255. call do_syscall_trace_exit
  256. j ret_from_exception
  257. END(handle_exception)
  258. ENTRY(ret_from_fork)
  259. la ra, ret_from_exception
  260. tail schedule_tail
  261. ENDPROC(ret_from_fork)
  262. ENTRY(ret_from_kernel_thread)
  263. call schedule_tail
  264. /* Call fn(arg) */
  265. la ra, ret_from_exception
  266. move a0, s1
  267. jr s0
  268. ENDPROC(ret_from_kernel_thread)
  269. /*
  270. * Integer register context switch
  271. * The callee-saved registers must be saved and restored.
  272. *
  273. * a0: previous task_struct (must be preserved across the switch)
  274. * a1: next task_struct
  275. *
  276. * The value of a0 and a1 must be preserved by this function, as that's how
  277. * arguments are passed to schedule_tail.
  278. */
  279. ENTRY(__switch_to)
  280. /* Save context into prev->thread */
  281. li a4, TASK_THREAD_RA
  282. add a3, a0, a4
  283. add a4, a1, a4
  284. REG_S ra, TASK_THREAD_RA_RA(a3)
  285. REG_S sp, TASK_THREAD_SP_RA(a3)
  286. REG_S s0, TASK_THREAD_S0_RA(a3)
  287. REG_S s1, TASK_THREAD_S1_RA(a3)
  288. REG_S s2, TASK_THREAD_S2_RA(a3)
  289. REG_S s3, TASK_THREAD_S3_RA(a3)
  290. REG_S s4, TASK_THREAD_S4_RA(a3)
  291. REG_S s5, TASK_THREAD_S5_RA(a3)
  292. REG_S s6, TASK_THREAD_S6_RA(a3)
  293. REG_S s7, TASK_THREAD_S7_RA(a3)
  294. REG_S s8, TASK_THREAD_S8_RA(a3)
  295. REG_S s9, TASK_THREAD_S9_RA(a3)
  296. REG_S s10, TASK_THREAD_S10_RA(a3)
  297. REG_S s11, TASK_THREAD_S11_RA(a3)
  298. /* Restore context from next->thread */
  299. REG_L ra, TASK_THREAD_RA_RA(a4)
  300. REG_L sp, TASK_THREAD_SP_RA(a4)
  301. REG_L s0, TASK_THREAD_S0_RA(a4)
  302. REG_L s1, TASK_THREAD_S1_RA(a4)
  303. REG_L s2, TASK_THREAD_S2_RA(a4)
  304. REG_L s3, TASK_THREAD_S3_RA(a4)
  305. REG_L s4, TASK_THREAD_S4_RA(a4)
  306. REG_L s5, TASK_THREAD_S5_RA(a4)
  307. REG_L s6, TASK_THREAD_S6_RA(a4)
  308. REG_L s7, TASK_THREAD_S7_RA(a4)
  309. REG_L s8, TASK_THREAD_S8_RA(a4)
  310. REG_L s9, TASK_THREAD_S9_RA(a4)
  311. REG_L s10, TASK_THREAD_S10_RA(a4)
  312. REG_L s11, TASK_THREAD_S11_RA(a4)
  313. /* Swap the CPU entry around. */
  314. lw a3, TASK_TI_CPU(a0)
  315. lw a4, TASK_TI_CPU(a1)
  316. sw a3, TASK_TI_CPU(a1)
  317. sw a4, TASK_TI_CPU(a0)
  318. #if TASK_TI != 0
  319. #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
  320. addi tp, a1, TASK_TI
  321. #else
  322. move tp, a1
  323. #endif
  324. ret
  325. ENDPROC(__switch_to)
  326. .section ".rodata"
  327. /* Exception vector table */
  328. ENTRY(excp_vect_table)
  329. RISCV_PTR do_trap_insn_misaligned
  330. RISCV_PTR do_trap_insn_fault
  331. RISCV_PTR do_trap_insn_illegal
  332. RISCV_PTR do_trap_break
  333. RISCV_PTR do_trap_load_misaligned
  334. RISCV_PTR do_trap_load_fault
  335. RISCV_PTR do_trap_store_misaligned
  336. RISCV_PTR do_trap_store_fault
  337. RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
  338. RISCV_PTR do_trap_ecall_s
  339. RISCV_PTR do_trap_unknown
  340. RISCV_PTR do_trap_ecall_m
  341. RISCV_PTR do_page_fault /* instruction page fault */
  342. RISCV_PTR do_page_fault /* load page fault */
  343. RISCV_PTR do_trap_unknown
  344. RISCV_PTR do_page_fault /* store page fault */
  345. excp_vect_table_end:
  346. END(excp_vect_table)