entry.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/assembler.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/errno.h>
  25. #include <asm/esr.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/unistd.h>
  28. #include <asm/unistd32.h>
  29. /*
  30. * Bad Abort numbers
  31. *-----------------
  32. */
  33. #define BAD_SYNC 0
  34. #define BAD_IRQ 1
  35. #define BAD_FIQ 2
  36. #define BAD_ERROR 3
  37. .macro kernel_entry, el, regsize = 64
  38. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  39. .if \regsize == 32
  40. mov w0, w0 // zero upper 32 bits of x0
  41. .endif
  42. push x28, x29
  43. push x26, x27
  44. push x24, x25
  45. push x22, x23
  46. push x20, x21
  47. push x18, x19
  48. push x16, x17
  49. push x14, x15
  50. push x12, x13
  51. push x10, x11
  52. push x8, x9
  53. push x6, x7
  54. push x4, x5
  55. push x2, x3
  56. push x0, x1
  57. .if \el == 0
  58. mrs x21, sp_el0
  59. get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
  60. ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
  61. disable_step_tsk x19, x20 // exceptions when scheduling.
  62. .else
  63. add x21, sp, #S_FRAME_SIZE
  64. .endif
  65. mrs x22, elr_el1
  66. mrs x23, spsr_el1
  67. stp lr, x21, [sp, #S_LR]
  68. stp x22, x23, [sp, #S_PC]
  69. /*
  70. * Set syscallno to -1 by default (overridden later if real syscall).
  71. */
  72. .if \el == 0
  73. mvn x21, xzr
  74. str x21, [sp, #S_SYSCALLNO]
  75. .endif
  76. /*
  77. * Registers that may be useful after this macro is invoked:
  78. *
  79. * x21 - aborted SP
  80. * x22 - aborted PC
  81. * x23 - aborted PSTATE
  82. */
  83. .endm
  84. .macro kernel_exit, el, ret = 0
  85. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  86. .if \el == 0
  87. ldr x23, [sp, #S_SP] // load return stack pointer
  88. .endif
  89. .if \ret
  90. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  91. add sp, sp, S_X2
  92. .else
  93. pop x0, x1
  94. .endif
  95. pop x2, x3 // load the rest of the registers
  96. pop x4, x5
  97. pop x6, x7
  98. pop x8, x9
  99. msr elr_el1, x21 // set up the return data
  100. msr spsr_el1, x22
  101. .if \el == 0
  102. msr sp_el0, x23
  103. .endif
  104. pop x10, x11
  105. pop x12, x13
  106. pop x14, x15
  107. pop x16, x17
  108. pop x18, x19
  109. pop x20, x21
  110. pop x22, x23
  111. pop x24, x25
  112. pop x26, x27
  113. pop x28, x29
  114. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  115. eret // return to kernel
  116. .endm
  117. .macro get_thread_info, rd
  118. mov \rd, sp
  119. and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
  120. .endm
  121. /*
  122. * These are the registers used in the syscall handler, and allow us to
  123. * have in theory up to 7 arguments to a function - x0 to x6.
  124. *
  125. * x7 is reserved for the system call number in 32-bit mode.
  126. */
  127. sc_nr .req x25 // number of system calls
  128. scno .req x26 // syscall number
  129. stbl .req x27 // syscall table pointer
  130. tsk .req x28 // current thread_info
  131. /*
  132. * Interrupt handling.
  133. */
  134. .macro irq_handler
  135. ldr x1, handle_arch_irq
  136. mov x0, sp
  137. blr x1
  138. .endm
  139. .text
  140. /*
  141. * Exception vectors.
  142. */
  143. .align 11
  144. ENTRY(vectors)
  145. ventry el1_sync_invalid // Synchronous EL1t
  146. ventry el1_irq_invalid // IRQ EL1t
  147. ventry el1_fiq_invalid // FIQ EL1t
  148. ventry el1_error_invalid // Error EL1t
  149. ventry el1_sync // Synchronous EL1h
  150. ventry el1_irq // IRQ EL1h
  151. ventry el1_fiq_invalid // FIQ EL1h
  152. ventry el1_error_invalid // Error EL1h
  153. ventry el0_sync // Synchronous 64-bit EL0
  154. ventry el0_irq // IRQ 64-bit EL0
  155. ventry el0_fiq_invalid // FIQ 64-bit EL0
  156. ventry el0_error_invalid // Error 64-bit EL0
  157. #ifdef CONFIG_COMPAT
  158. ventry el0_sync_compat // Synchronous 32-bit EL0
  159. ventry el0_irq_compat // IRQ 32-bit EL0
  160. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  161. ventry el0_error_invalid_compat // Error 32-bit EL0
  162. #else
  163. ventry el0_sync_invalid // Synchronous 32-bit EL0
  164. ventry el0_irq_invalid // IRQ 32-bit EL0
  165. ventry el0_fiq_invalid // FIQ 32-bit EL0
  166. ventry el0_error_invalid // Error 32-bit EL0
  167. #endif
  168. END(vectors)
  169. /*
  170. * Invalid mode handlers
  171. */
  172. .macro inv_entry, el, reason, regsize = 64
  173. kernel_entry el, \regsize
  174. mov x0, sp
  175. mov x1, #\reason
  176. mrs x2, esr_el1
  177. b bad_mode
  178. .endm
  179. el0_sync_invalid:
  180. inv_entry 0, BAD_SYNC
  181. ENDPROC(el0_sync_invalid)
  182. el0_irq_invalid:
  183. inv_entry 0, BAD_IRQ
  184. ENDPROC(el0_irq_invalid)
  185. el0_fiq_invalid:
  186. inv_entry 0, BAD_FIQ
  187. ENDPROC(el0_fiq_invalid)
  188. el0_error_invalid:
  189. inv_entry 0, BAD_ERROR
  190. ENDPROC(el0_error_invalid)
  191. #ifdef CONFIG_COMPAT
  192. el0_fiq_invalid_compat:
  193. inv_entry 0, BAD_FIQ, 32
  194. ENDPROC(el0_fiq_invalid_compat)
  195. el0_error_invalid_compat:
  196. inv_entry 0, BAD_ERROR, 32
  197. ENDPROC(el0_error_invalid_compat)
  198. #endif
  199. el1_sync_invalid:
  200. inv_entry 1, BAD_SYNC
  201. ENDPROC(el1_sync_invalid)
  202. el1_irq_invalid:
  203. inv_entry 1, BAD_IRQ
  204. ENDPROC(el1_irq_invalid)
  205. el1_fiq_invalid:
  206. inv_entry 1, BAD_FIQ
  207. ENDPROC(el1_fiq_invalid)
  208. el1_error_invalid:
  209. inv_entry 1, BAD_ERROR
  210. ENDPROC(el1_error_invalid)
  211. /*
  212. * EL1 mode handlers.
  213. */
  214. .align 6
  215. el1_sync:
  216. kernel_entry 1
  217. mrs x1, esr_el1 // read the syndrome register
  218. lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
  219. cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
  220. b.eq el1_da
  221. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  222. b.eq el1_undef
  223. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  224. b.eq el1_sp_pc
  225. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  226. b.eq el1_sp_pc
  227. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
  228. b.eq el1_undef
  229. cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
  230. b.ge el1_dbg
  231. b el1_inv
  232. el1_da:
  233. /*
  234. * Data abort handling
  235. */
  236. mrs x0, far_el1
  237. enable_dbg
  238. // re-enable interrupts if they were enabled in the aborted context
  239. tbnz x23, #7, 1f // PSR_I_BIT
  240. enable_irq
  241. 1:
  242. mov x2, sp // struct pt_regs
  243. bl do_mem_abort
  244. // disable interrupts before pulling preserved data off the stack
  245. disable_irq
  246. kernel_exit 1
  247. el1_sp_pc:
  248. /*
  249. * Stack or PC alignment exception handling
  250. */
  251. mrs x0, far_el1
  252. enable_dbg
  253. mov x2, sp
  254. b do_sp_pc_abort
  255. el1_undef:
  256. /*
  257. * Undefined instruction
  258. */
  259. enable_dbg
  260. mov x0, sp
  261. b do_undefinstr
  262. el1_dbg:
  263. /*
  264. * Debug exception handling
  265. */
  266. cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
  267. cinc x24, x24, eq // set bit '0'
  268. tbz x24, #0, el1_inv // EL1 only
  269. mrs x0, far_el1
  270. mov x2, sp // struct pt_regs
  271. bl do_debug_exception
  272. enable_dbg
  273. kernel_exit 1
  274. el1_inv:
  275. // TODO: add support for undefined instructions in kernel mode
  276. enable_dbg
  277. mov x0, sp
  278. mov x1, #BAD_SYNC
  279. mrs x2, esr_el1
  280. b bad_mode
  281. ENDPROC(el1_sync)
  282. .align 6
  283. el1_irq:
  284. kernel_entry 1
  285. enable_dbg
  286. #ifdef CONFIG_TRACE_IRQFLAGS
  287. bl trace_hardirqs_off
  288. #endif
  289. irq_handler
  290. #ifdef CONFIG_PREEMPT
  291. get_thread_info tsk
  292. ldr w24, [tsk, #TI_PREEMPT] // get preempt count
  293. cbnz w24, 1f // preempt count != 0
  294. ldr x0, [tsk, #TI_FLAGS] // get flags
  295. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  296. bl el1_preempt
  297. 1:
  298. #endif
  299. #ifdef CONFIG_TRACE_IRQFLAGS
  300. bl trace_hardirqs_on
  301. #endif
  302. kernel_exit 1
  303. ENDPROC(el1_irq)
  304. #ifdef CONFIG_PREEMPT
  305. el1_preempt:
  306. mov x24, lr
  307. 1: bl preempt_schedule_irq // irq en/disable is done inside
  308. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  309. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  310. ret x24
  311. #endif
  312. /*
  313. * EL0 mode handlers.
  314. */
  315. .align 6
  316. el0_sync:
  317. kernel_entry 0
  318. mrs x25, esr_el1 // read the syndrome register
  319. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  320. cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
  321. b.eq el0_svc
  322. adr lr, ret_to_user
  323. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  324. b.eq el0_da
  325. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  326. b.eq el0_ia
  327. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  328. b.eq el0_fpsimd_acc
  329. cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
  330. b.eq el0_fpsimd_exc
  331. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  332. b.eq el0_undef
  333. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  334. b.eq el0_sp_pc
  335. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  336. b.eq el0_sp_pc
  337. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  338. b.eq el0_undef
  339. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  340. b.ge el0_dbg
  341. b el0_inv
  342. #ifdef CONFIG_COMPAT
  343. .align 6
  344. el0_sync_compat:
  345. kernel_entry 0, 32
  346. mrs x25, esr_el1 // read the syndrome register
  347. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  348. cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
  349. b.eq el0_svc_compat
  350. adr lr, ret_to_user
  351. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  352. b.eq el0_da
  353. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  354. b.eq el0_ia
  355. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  356. b.eq el0_fpsimd_acc
  357. cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
  358. b.eq el0_fpsimd_exc
  359. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  360. b.eq el0_undef
  361. cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
  362. b.eq el0_undef
  363. cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
  364. b.eq el0_undef
  365. cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
  366. b.eq el0_undef
  367. cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
  368. b.eq el0_undef
  369. cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
  370. b.eq el0_undef
  371. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  372. b.ge el0_dbg
  373. b el0_inv
  374. el0_svc_compat:
  375. /*
  376. * AArch32 syscall handling
  377. */
  378. adr stbl, compat_sys_call_table // load compat syscall table pointer
  379. uxtw scno, w7 // syscall number in w7 (r7)
  380. mov sc_nr, #__NR_compat_syscalls
  381. b el0_svc_naked
  382. .align 6
  383. el0_irq_compat:
  384. kernel_entry 0, 32
  385. b el0_irq_naked
  386. #endif
  387. el0_da:
  388. /*
  389. * Data abort handling
  390. */
  391. mrs x0, far_el1
  392. bic x0, x0, #(0xff << 56)
  393. // enable interrupts before calling the main handler
  394. enable_dbg_and_irq
  395. mov x1, x25
  396. mov x2, sp
  397. b do_mem_abort
  398. el0_ia:
  399. /*
  400. * Instruction abort handling
  401. */
  402. mrs x0, far_el1
  403. // enable interrupts before calling the main handler
  404. enable_dbg_and_irq
  405. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  406. mov x2, sp
  407. b do_mem_abort
  408. el0_fpsimd_acc:
  409. /*
  410. * Floating Point or Advanced SIMD access
  411. */
  412. enable_dbg
  413. mov x0, x25
  414. mov x1, sp
  415. b do_fpsimd_acc
  416. el0_fpsimd_exc:
  417. /*
  418. * Floating Point or Advanced SIMD exception
  419. */
  420. enable_dbg
  421. mov x0, x25
  422. mov x1, sp
  423. b do_fpsimd_exc
  424. el0_sp_pc:
  425. /*
  426. * Stack or PC alignment exception handling
  427. */
  428. mrs x0, far_el1
  429. // enable interrupts before calling the main handler
  430. enable_dbg_and_irq
  431. mov x1, x25
  432. mov x2, sp
  433. b do_sp_pc_abort
  434. el0_undef:
  435. /*
  436. * Undefined instruction
  437. */
  438. // enable interrupts before calling the main handler
  439. enable_dbg_and_irq
  440. mov x0, sp
  441. b do_undefinstr
  442. el0_dbg:
  443. /*
  444. * Debug exception handling
  445. */
  446. tbnz x24, #0, el0_inv // EL0 only
  447. mrs x0, far_el1
  448. mov x1, x25
  449. mov x2, sp
  450. bl do_debug_exception
  451. enable_dbg
  452. b ret_to_user
  453. el0_inv:
  454. enable_dbg
  455. mov x0, sp
  456. mov x1, #BAD_SYNC
  457. mrs x2, esr_el1
  458. b bad_mode
  459. ENDPROC(el0_sync)
  460. .align 6
  461. el0_irq:
  462. kernel_entry 0
  463. el0_irq_naked:
  464. enable_dbg
  465. #ifdef CONFIG_TRACE_IRQFLAGS
  466. bl trace_hardirqs_off
  467. #endif
  468. irq_handler
  469. #ifdef CONFIG_TRACE_IRQFLAGS
  470. bl trace_hardirqs_on
  471. #endif
  472. b ret_to_user
  473. ENDPROC(el0_irq)
  474. /*
  475. * Register switch for AArch64. The callee-saved registers need to be saved
  476. * and restored. On entry:
  477. * x0 = previous task_struct (must be preserved across the switch)
  478. * x1 = next task_struct
  479. * Previous and next are guaranteed not to be the same.
  480. *
  481. */
  482. ENTRY(cpu_switch_to)
  483. add x8, x0, #THREAD_CPU_CONTEXT
  484. mov x9, sp
  485. stp x19, x20, [x8], #16 // store callee-saved registers
  486. stp x21, x22, [x8], #16
  487. stp x23, x24, [x8], #16
  488. stp x25, x26, [x8], #16
  489. stp x27, x28, [x8], #16
  490. stp x29, x9, [x8], #16
  491. str lr, [x8]
  492. add x8, x1, #THREAD_CPU_CONTEXT
  493. ldp x19, x20, [x8], #16 // restore callee-saved registers
  494. ldp x21, x22, [x8], #16
  495. ldp x23, x24, [x8], #16
  496. ldp x25, x26, [x8], #16
  497. ldp x27, x28, [x8], #16
  498. ldp x29, x9, [x8], #16
  499. ldr lr, [x8]
  500. mov sp, x9
  501. ret
  502. ENDPROC(cpu_switch_to)
  503. /*
  504. * This is the fast syscall return path. We do as little as possible here,
  505. * and this includes saving x0 back into the kernel stack.
  506. */
  507. ret_fast_syscall:
  508. disable_irq // disable interrupts
  509. ldr x1, [tsk, #TI_FLAGS]
  510. and x2, x1, #_TIF_WORK_MASK
  511. cbnz x2, fast_work_pending
  512. enable_step_tsk x1, x2
  513. kernel_exit 0, ret = 1
  514. /*
  515. * Ok, we need to do extra processing, enter the slow path.
  516. */
  517. fast_work_pending:
  518. str x0, [sp, #S_X0] // returned x0
  519. work_pending:
  520. tbnz x1, #TIF_NEED_RESCHED, work_resched
  521. /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
  522. ldr x2, [sp, #S_PSTATE]
  523. mov x0, sp // 'regs'
  524. tst x2, #PSR_MODE_MASK // user mode regs?
  525. b.ne no_work_pending // returning to kernel
  526. enable_irq // enable interrupts for do_notify_resume()
  527. bl do_notify_resume
  528. b ret_to_user
  529. work_resched:
  530. bl schedule
  531. /*
  532. * "slow" syscall return path.
  533. */
  534. ret_to_user:
  535. disable_irq // disable interrupts
  536. ldr x1, [tsk, #TI_FLAGS]
  537. and x2, x1, #_TIF_WORK_MASK
  538. cbnz x2, work_pending
  539. enable_step_tsk x1, x2
  540. no_work_pending:
  541. kernel_exit 0, ret = 0
  542. ENDPROC(ret_to_user)
  543. /*
  544. * This is how we return from a fork.
  545. */
  546. ENTRY(ret_from_fork)
  547. bl schedule_tail
  548. cbz x19, 1f // not a kernel thread
  549. mov x0, x20
  550. blr x19
  551. 1: get_thread_info tsk
  552. b ret_to_user
  553. ENDPROC(ret_from_fork)
  554. /*
  555. * SVC handler.
  556. */
  557. .align 6
  558. el0_svc:
  559. adrp stbl, sys_call_table // load syscall table pointer
  560. uxtw scno, w8 // syscall number in w8
  561. mov sc_nr, #__NR_syscalls
  562. el0_svc_naked: // compat entry point
  563. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  564. enable_dbg_and_irq
  565. ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
  566. tst x16, #_TIF_SYSCALL_WORK
  567. b.ne __sys_trace
  568. adr lr, ret_fast_syscall // return address
  569. cmp scno, sc_nr // check upper syscall limit
  570. b.hs ni_sys
  571. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  572. br x16 // call sys_* routine
  573. ni_sys:
  574. mov x0, sp
  575. b do_ni_syscall
  576. ENDPROC(el0_svc)
  577. /*
  578. * This is the really slow path. We're going to be doing context
  579. * switches, and waiting for our parent to respond.
  580. */
  581. __sys_trace:
  582. mov x0, sp
  583. bl syscall_trace_enter
  584. adr lr, __sys_trace_return // return address
  585. uxtw scno, w0 // syscall number (possibly new)
  586. mov x1, sp // pointer to regs
  587. cmp scno, sc_nr // check upper syscall limit
  588. b.hs ni_sys
  589. ldp x0, x1, [sp] // restore the syscall args
  590. ldp x2, x3, [sp, #S_X2]
  591. ldp x4, x5, [sp, #S_X4]
  592. ldp x6, x7, [sp, #S_X6]
  593. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  594. br x16 // call sys_* routine
  595. __sys_trace_return:
  596. str x0, [sp] // save returned x0
  597. mov x0, sp
  598. bl syscall_trace_exit
  599. b ret_to_user
  600. /*
  601. * Special system call wrappers.
  602. */
  603. ENTRY(sys_rt_sigreturn_wrapper)
  604. mov x0, sp
  605. b sys_rt_sigreturn
  606. ENDPROC(sys_rt_sigreturn_wrapper)
  607. ENTRY(handle_arch_irq)
  608. .quad 0