entry.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/assembler.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/errno.h>
  25. #include <asm/esr.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/unistd.h>
  28. /*
  29. * Context tracking subsystem. Used to instrument transitions
  30. * between user and kernel mode.
  31. */
  32. .macro ct_user_exit, syscall = 0
  33. #ifdef CONFIG_CONTEXT_TRACKING
  34. bl context_tracking_user_exit
  35. .if \syscall == 1
  36. /*
  37. * Save/restore needed during syscalls. Restore syscall arguments from
  38. * the values already saved on stack during kernel_entry.
  39. */
  40. ldp x0, x1, [sp]
  41. ldp x2, x3, [sp, #S_X2]
  42. ldp x4, x5, [sp, #S_X4]
  43. ldp x6, x7, [sp, #S_X6]
  44. .endif
  45. #endif
  46. .endm
  47. .macro ct_user_enter
  48. #ifdef CONFIG_CONTEXT_TRACKING
  49. bl context_tracking_user_enter
  50. #endif
  51. .endm
  52. /*
  53. * Bad Abort numbers
  54. *-----------------
  55. */
  56. #define BAD_SYNC 0
  57. #define BAD_IRQ 1
  58. #define BAD_FIQ 2
  59. #define BAD_ERROR 3
  60. .macro kernel_entry, el, regsize = 64
  61. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  62. .if \regsize == 32
  63. mov w0, w0 // zero upper 32 bits of x0
  64. .endif
  65. push x28, x29
  66. push x26, x27
  67. push x24, x25
  68. push x22, x23
  69. push x20, x21
  70. push x18, x19
  71. push x16, x17
  72. push x14, x15
  73. push x12, x13
  74. push x10, x11
  75. push x8, x9
  76. push x6, x7
  77. push x4, x5
  78. push x2, x3
  79. push x0, x1
  80. .if \el == 0
  81. mrs x21, sp_el0
  82. get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
  83. ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
  84. disable_step_tsk x19, x20 // exceptions when scheduling.
  85. .else
  86. add x21, sp, #S_FRAME_SIZE
  87. .endif
  88. mrs x22, elr_el1
  89. mrs x23, spsr_el1
  90. stp lr, x21, [sp, #S_LR]
  91. stp x22, x23, [sp, #S_PC]
  92. /*
  93. * Set syscallno to -1 by default (overridden later if real syscall).
  94. */
  95. .if \el == 0
  96. mvn x21, xzr
  97. str x21, [sp, #S_SYSCALLNO]
  98. .endif
  99. /*
  100. * Registers that may be useful after this macro is invoked:
  101. *
  102. * x21 - aborted SP
  103. * x22 - aborted PC
  104. * x23 - aborted PSTATE
  105. */
  106. .endm
  107. .macro kernel_exit, el, ret = 0
  108. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  109. .if \el == 0
  110. ct_user_enter
  111. ldr x23, [sp, #S_SP] // load return stack pointer
  112. .endif
  113. .if \ret
  114. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  115. add sp, sp, S_X2
  116. .else
  117. pop x0, x1
  118. .endif
  119. pop x2, x3 // load the rest of the registers
  120. pop x4, x5
  121. pop x6, x7
  122. pop x8, x9
  123. msr elr_el1, x21 // set up the return data
  124. msr spsr_el1, x22
  125. .if \el == 0
  126. msr sp_el0, x23
  127. .endif
  128. pop x10, x11
  129. pop x12, x13
  130. pop x14, x15
  131. pop x16, x17
  132. pop x18, x19
  133. pop x20, x21
  134. pop x22, x23
  135. pop x24, x25
  136. pop x26, x27
  137. pop x28, x29
  138. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  139. eret // return to kernel
  140. .endm
  141. .macro get_thread_info, rd
  142. mov \rd, sp
  143. and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
  144. .endm
  145. /*
  146. * These are the registers used in the syscall handler, and allow us to
  147. * have in theory up to 7 arguments to a function - x0 to x6.
  148. *
  149. * x7 is reserved for the system call number in 32-bit mode.
  150. */
  151. sc_nr .req x25 // number of system calls
  152. scno .req x26 // syscall number
  153. stbl .req x27 // syscall table pointer
  154. tsk .req x28 // current thread_info
  155. /*
  156. * Interrupt handling.
  157. */
  158. .macro irq_handler
  159. ldr x1, handle_arch_irq
  160. mov x0, sp
  161. blr x1
  162. .endm
  163. .text
  164. /*
  165. * Exception vectors.
  166. */
  167. .align 11
  168. ENTRY(vectors)
  169. ventry el1_sync_invalid // Synchronous EL1t
  170. ventry el1_irq_invalid // IRQ EL1t
  171. ventry el1_fiq_invalid // FIQ EL1t
  172. ventry el1_error_invalid // Error EL1t
  173. ventry el1_sync // Synchronous EL1h
  174. ventry el1_irq // IRQ EL1h
  175. ventry el1_fiq_invalid // FIQ EL1h
  176. ventry el1_error_invalid // Error EL1h
  177. ventry el0_sync // Synchronous 64-bit EL0
  178. ventry el0_irq // IRQ 64-bit EL0
  179. ventry el0_fiq_invalid // FIQ 64-bit EL0
  180. ventry el0_error_invalid // Error 64-bit EL0
  181. #ifdef CONFIG_COMPAT
  182. ventry el0_sync_compat // Synchronous 32-bit EL0
  183. ventry el0_irq_compat // IRQ 32-bit EL0
  184. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  185. ventry el0_error_invalid_compat // Error 32-bit EL0
  186. #else
  187. ventry el0_sync_invalid // Synchronous 32-bit EL0
  188. ventry el0_irq_invalid // IRQ 32-bit EL0
  189. ventry el0_fiq_invalid // FIQ 32-bit EL0
  190. ventry el0_error_invalid // Error 32-bit EL0
  191. #endif
  192. END(vectors)
  193. /*
  194. * Invalid mode handlers
  195. */
  196. .macro inv_entry, el, reason, regsize = 64
  197. kernel_entry el, \regsize
  198. mov x0, sp
  199. mov x1, #\reason
  200. mrs x2, esr_el1
  201. b bad_mode
  202. .endm
  203. el0_sync_invalid:
  204. inv_entry 0, BAD_SYNC
  205. ENDPROC(el0_sync_invalid)
  206. el0_irq_invalid:
  207. inv_entry 0, BAD_IRQ
  208. ENDPROC(el0_irq_invalid)
  209. el0_fiq_invalid:
  210. inv_entry 0, BAD_FIQ
  211. ENDPROC(el0_fiq_invalid)
  212. el0_error_invalid:
  213. inv_entry 0, BAD_ERROR
  214. ENDPROC(el0_error_invalid)
  215. #ifdef CONFIG_COMPAT
  216. el0_fiq_invalid_compat:
  217. inv_entry 0, BAD_FIQ, 32
  218. ENDPROC(el0_fiq_invalid_compat)
  219. el0_error_invalid_compat:
  220. inv_entry 0, BAD_ERROR, 32
  221. ENDPROC(el0_error_invalid_compat)
  222. #endif
  223. el1_sync_invalid:
  224. inv_entry 1, BAD_SYNC
  225. ENDPROC(el1_sync_invalid)
  226. el1_irq_invalid:
  227. inv_entry 1, BAD_IRQ
  228. ENDPROC(el1_irq_invalid)
  229. el1_fiq_invalid:
  230. inv_entry 1, BAD_FIQ
  231. ENDPROC(el1_fiq_invalid)
  232. el1_error_invalid:
  233. inv_entry 1, BAD_ERROR
  234. ENDPROC(el1_error_invalid)
  235. /*
  236. * EL1 mode handlers.
  237. */
  238. .align 6
  239. el1_sync:
  240. kernel_entry 1
  241. mrs x1, esr_el1 // read the syndrome register
  242. lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
  243. cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
  244. b.eq el1_da
  245. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  246. b.eq el1_undef
  247. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  248. b.eq el1_sp_pc
  249. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  250. b.eq el1_sp_pc
  251. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
  252. b.eq el1_undef
  253. cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
  254. b.ge el1_dbg
  255. b el1_inv
  256. el1_da:
  257. /*
  258. * Data abort handling
  259. */
  260. mrs x0, far_el1
  261. enable_dbg
  262. // re-enable interrupts if they were enabled in the aborted context
  263. tbnz x23, #7, 1f // PSR_I_BIT
  264. enable_irq
  265. 1:
  266. mov x2, sp // struct pt_regs
  267. bl do_mem_abort
  268. // disable interrupts before pulling preserved data off the stack
  269. disable_irq
  270. kernel_exit 1
  271. el1_sp_pc:
  272. /*
  273. * Stack or PC alignment exception handling
  274. */
  275. mrs x0, far_el1
  276. enable_dbg
  277. mov x2, sp
  278. b do_sp_pc_abort
  279. el1_undef:
  280. /*
  281. * Undefined instruction
  282. */
  283. enable_dbg
  284. mov x0, sp
  285. b do_undefinstr
  286. el1_dbg:
  287. /*
  288. * Debug exception handling
  289. */
  290. cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
  291. cinc x24, x24, eq // set bit '0'
  292. tbz x24, #0, el1_inv // EL1 only
  293. mrs x0, far_el1
  294. mov x2, sp // struct pt_regs
  295. bl do_debug_exception
  296. enable_dbg
  297. kernel_exit 1
  298. el1_inv:
  299. // TODO: add support for undefined instructions in kernel mode
  300. enable_dbg
  301. mov x0, sp
  302. mov x1, #BAD_SYNC
  303. mrs x2, esr_el1
  304. b bad_mode
  305. ENDPROC(el1_sync)
  306. .align 6
  307. el1_irq:
  308. kernel_entry 1
  309. enable_dbg
  310. #ifdef CONFIG_TRACE_IRQFLAGS
  311. bl trace_hardirqs_off
  312. #endif
  313. irq_handler
  314. #ifdef CONFIG_PREEMPT
  315. get_thread_info tsk
  316. ldr w24, [tsk, #TI_PREEMPT] // get preempt count
  317. cbnz w24, 1f // preempt count != 0
  318. ldr x0, [tsk, #TI_FLAGS] // get flags
  319. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  320. bl el1_preempt
  321. 1:
  322. #endif
  323. #ifdef CONFIG_TRACE_IRQFLAGS
  324. bl trace_hardirqs_on
  325. #endif
  326. kernel_exit 1
  327. ENDPROC(el1_irq)
  328. #ifdef CONFIG_PREEMPT
  329. el1_preempt:
  330. mov x24, lr
  331. 1: bl preempt_schedule_irq // irq en/disable is done inside
  332. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  333. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  334. ret x24
  335. #endif
  336. /*
  337. * EL0 mode handlers.
  338. */
  339. .align 6
  340. el0_sync:
  341. kernel_entry 0
  342. mrs x25, esr_el1 // read the syndrome register
  343. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  344. cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
  345. b.eq el0_svc
  346. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  347. b.eq el0_da
  348. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  349. b.eq el0_ia
  350. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  351. b.eq el0_fpsimd_acc
  352. cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
  353. b.eq el0_fpsimd_exc
  354. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  355. b.eq el0_undef
  356. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  357. b.eq el0_sp_pc
  358. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  359. b.eq el0_sp_pc
  360. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  361. b.eq el0_undef
  362. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  363. b.ge el0_dbg
  364. b el0_inv
  365. #ifdef CONFIG_COMPAT
  366. .align 6
  367. el0_sync_compat:
  368. kernel_entry 0, 32
  369. mrs x25, esr_el1 // read the syndrome register
  370. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  371. cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
  372. b.eq el0_svc_compat
  373. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  374. b.eq el0_da
  375. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  376. b.eq el0_ia
  377. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  378. b.eq el0_fpsimd_acc
  379. cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
  380. b.eq el0_fpsimd_exc
  381. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  382. b.eq el0_undef
  383. cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
  384. b.eq el0_undef
  385. cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
  386. b.eq el0_undef
  387. cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
  388. b.eq el0_undef
  389. cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
  390. b.eq el0_undef
  391. cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
  392. b.eq el0_undef
  393. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  394. b.ge el0_dbg
  395. b el0_inv
  396. el0_svc_compat:
  397. /*
  398. * AArch32 syscall handling
  399. */
  400. adr stbl, compat_sys_call_table // load compat syscall table pointer
  401. uxtw scno, w7 // syscall number in w7 (r7)
  402. mov sc_nr, #__NR_compat_syscalls
  403. b el0_svc_naked
  404. .align 6
  405. el0_irq_compat:
  406. kernel_entry 0, 32
  407. b el0_irq_naked
  408. #endif
  409. el0_da:
  410. /*
  411. * Data abort handling
  412. */
  413. mrs x26, far_el1
  414. // enable interrupts before calling the main handler
  415. enable_dbg_and_irq
  416. ct_user_exit
  417. bic x0, x26, #(0xff << 56)
  418. mov x1, x25
  419. mov x2, sp
  420. adr lr, ret_to_user
  421. b do_mem_abort
  422. el0_ia:
  423. /*
  424. * Instruction abort handling
  425. */
  426. mrs x26, far_el1
  427. // enable interrupts before calling the main handler
  428. enable_dbg_and_irq
  429. ct_user_exit
  430. mov x0, x26
  431. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  432. mov x2, sp
  433. adr lr, ret_to_user
  434. b do_mem_abort
  435. el0_fpsimd_acc:
  436. /*
  437. * Floating Point or Advanced SIMD access
  438. */
  439. enable_dbg
  440. ct_user_exit
  441. mov x0, x25
  442. mov x1, sp
  443. adr lr, ret_to_user
  444. b do_fpsimd_acc
  445. el0_fpsimd_exc:
  446. /*
  447. * Floating Point or Advanced SIMD exception
  448. */
  449. enable_dbg
  450. ct_user_exit
  451. mov x0, x25
  452. mov x1, sp
  453. adr lr, ret_to_user
  454. b do_fpsimd_exc
  455. el0_sp_pc:
  456. /*
  457. * Stack or PC alignment exception handling
  458. */
  459. mrs x26, far_el1
  460. // enable interrupts before calling the main handler
  461. enable_dbg_and_irq
  462. mov x0, x26
  463. mov x1, x25
  464. mov x2, sp
  465. adr lr, ret_to_user
  466. b do_sp_pc_abort
  467. el0_undef:
  468. /*
  469. * Undefined instruction
  470. */
  471. // enable interrupts before calling the main handler
  472. enable_dbg_and_irq
  473. ct_user_exit
  474. mov x0, sp
  475. adr lr, ret_to_user
  476. b do_undefinstr
  477. el0_dbg:
  478. /*
  479. * Debug exception handling
  480. */
  481. tbnz x24, #0, el0_inv // EL0 only
  482. mrs x0, far_el1
  483. mov x1, x25
  484. mov x2, sp
  485. bl do_debug_exception
  486. enable_dbg
  487. ct_user_exit
  488. b ret_to_user
  489. el0_inv:
  490. enable_dbg
  491. ct_user_exit
  492. mov x0, sp
  493. mov x1, #BAD_SYNC
  494. mrs x2, esr_el1
  495. adr lr, ret_to_user
  496. b bad_mode
  497. ENDPROC(el0_sync)
  498. .align 6
  499. el0_irq:
  500. kernel_entry 0
  501. el0_irq_naked:
  502. enable_dbg
  503. #ifdef CONFIG_TRACE_IRQFLAGS
  504. bl trace_hardirqs_off
  505. #endif
  506. ct_user_exit
  507. irq_handler
  508. #ifdef CONFIG_TRACE_IRQFLAGS
  509. bl trace_hardirqs_on
  510. #endif
  511. b ret_to_user
  512. ENDPROC(el0_irq)
  513. /*
  514. * Register switch for AArch64. The callee-saved registers need to be saved
  515. * and restored. On entry:
  516. * x0 = previous task_struct (must be preserved across the switch)
  517. * x1 = next task_struct
  518. * Previous and next are guaranteed not to be the same.
  519. *
  520. */
  521. ENTRY(cpu_switch_to)
  522. add x8, x0, #THREAD_CPU_CONTEXT
  523. mov x9, sp
  524. stp x19, x20, [x8], #16 // store callee-saved registers
  525. stp x21, x22, [x8], #16
  526. stp x23, x24, [x8], #16
  527. stp x25, x26, [x8], #16
  528. stp x27, x28, [x8], #16
  529. stp x29, x9, [x8], #16
  530. str lr, [x8]
  531. add x8, x1, #THREAD_CPU_CONTEXT
  532. ldp x19, x20, [x8], #16 // restore callee-saved registers
  533. ldp x21, x22, [x8], #16
  534. ldp x23, x24, [x8], #16
  535. ldp x25, x26, [x8], #16
  536. ldp x27, x28, [x8], #16
  537. ldp x29, x9, [x8], #16
  538. ldr lr, [x8]
  539. mov sp, x9
  540. ret
  541. ENDPROC(cpu_switch_to)
  542. /*
  543. * This is the fast syscall return path. We do as little as possible here,
  544. * and this includes saving x0 back into the kernel stack.
  545. */
  546. ret_fast_syscall:
  547. disable_irq // disable interrupts
  548. ldr x1, [tsk, #TI_FLAGS]
  549. and x2, x1, #_TIF_WORK_MASK
  550. cbnz x2, fast_work_pending
  551. enable_step_tsk x1, x2
  552. kernel_exit 0, ret = 1
  553. /*
  554. * Ok, we need to do extra processing, enter the slow path.
  555. */
  556. fast_work_pending:
  557. str x0, [sp, #S_X0] // returned x0
  558. work_pending:
  559. tbnz x1, #TIF_NEED_RESCHED, work_resched
  560. /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
  561. ldr x2, [sp, #S_PSTATE]
  562. mov x0, sp // 'regs'
  563. tst x2, #PSR_MODE_MASK // user mode regs?
  564. b.ne no_work_pending // returning to kernel
  565. enable_irq // enable interrupts for do_notify_resume()
  566. bl do_notify_resume
  567. b ret_to_user
  568. work_resched:
  569. bl schedule
  570. /*
  571. * "slow" syscall return path.
  572. */
  573. ret_to_user:
  574. disable_irq // disable interrupts
  575. ldr x1, [tsk, #TI_FLAGS]
  576. and x2, x1, #_TIF_WORK_MASK
  577. cbnz x2, work_pending
  578. enable_step_tsk x1, x2
  579. no_work_pending:
  580. kernel_exit 0, ret = 0
  581. ENDPROC(ret_to_user)
  582. /*
  583. * This is how we return from a fork.
  584. */
  585. ENTRY(ret_from_fork)
  586. bl schedule_tail
  587. cbz x19, 1f // not a kernel thread
  588. mov x0, x20
  589. blr x19
  590. 1: get_thread_info tsk
  591. b ret_to_user
  592. ENDPROC(ret_from_fork)
  593. /*
  594. * SVC handler.
  595. */
  596. .align 6
  597. el0_svc:
  598. adrp stbl, sys_call_table // load syscall table pointer
  599. uxtw scno, w8 // syscall number in w8
  600. mov sc_nr, #__NR_syscalls
  601. el0_svc_naked: // compat entry point
  602. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  603. enable_dbg_and_irq
  604. ct_user_exit 1
  605. ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
  606. tst x16, #_TIF_SYSCALL_WORK
  607. b.ne __sys_trace
  608. adr lr, ret_fast_syscall // return address
  609. cmp scno, sc_nr // check upper syscall limit
  610. b.hs ni_sys
  611. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  612. br x16 // call sys_* routine
  613. ni_sys:
  614. mov x0, sp
  615. b do_ni_syscall
  616. ENDPROC(el0_svc)
  617. /*
  618. * This is the really slow path. We're going to be doing context
  619. * switches, and waiting for our parent to respond.
  620. */
  621. __sys_trace:
  622. mov x0, sp
  623. bl syscall_trace_enter
  624. adr lr, __sys_trace_return // return address
  625. uxtw scno, w0 // syscall number (possibly new)
  626. mov x1, sp // pointer to regs
  627. cmp scno, sc_nr // check upper syscall limit
  628. b.hs ni_sys
  629. ldp x0, x1, [sp] // restore the syscall args
  630. ldp x2, x3, [sp, #S_X2]
  631. ldp x4, x5, [sp, #S_X4]
  632. ldp x6, x7, [sp, #S_X6]
  633. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  634. br x16 // call sys_* routine
  635. __sys_trace_return:
  636. str x0, [sp] // save returned x0
  637. mov x0, sp
  638. bl syscall_trace_exit
  639. b ret_to_user
  640. /*
  641. * Special system call wrappers.
  642. */
  643. ENTRY(sys_rt_sigreturn_wrapper)
  644. mov x0, sp
  645. b sys_rt_sigreturn
  646. ENDPROC(sys_rt_sigreturn_wrapper)
  647. ENTRY(handle_arch_irq)
  648. .quad 0