|
@@ -116,7 +116,7 @@
|
|
|
*/
|
|
|
.endm
|
|
|
|
|
|
- .macro kernel_exit, el, ret = 0
|
|
|
+ .macro kernel_exit, el
|
|
|
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
|
|
.if \el == 0
|
|
|
ct_user_enter
|
|
@@ -143,11 +143,7 @@ alternative_endif
|
|
|
.endif
|
|
|
msr elr_el1, x21 // set up the return data
|
|
|
msr spsr_el1, x22
|
|
|
- .if \ret
|
|
|
- ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
|
|
|
- .else
|
|
|
ldp x0, x1, [sp, #16 * 0]
|
|
|
- .endif
|
|
|
ldp x2, x3, [sp, #16 * 1]
|
|
|
ldp x4, x5, [sp, #16 * 2]
|
|
|
ldp x6, x7, [sp, #16 * 3]
|
|
@@ -610,22 +606,21 @@ ENDPROC(cpu_switch_to)
|
|
|
*/
|
|
|
ret_fast_syscall:
|
|
|
disable_irq // disable interrupts
|
|
|
+ str x0, [sp, #S_X0] // returned x0
|
|
|
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
|
|
|
and x2, x1, #_TIF_SYSCALL_WORK
|
|
|
cbnz x2, ret_fast_syscall_trace
|
|
|
and x2, x1, #_TIF_WORK_MASK
|
|
|
- cbnz x2, fast_work_pending
|
|
|
+ cbnz x2, work_pending
|
|
|
enable_step_tsk x1, x2
|
|
|
- kernel_exit 0, ret = 1
|
|
|
+ kernel_exit 0
|
|
|
ret_fast_syscall_trace:
|
|
|
enable_irq // enable interrupts
|
|
|
- b __sys_trace_return
|
|
|
+ b __sys_trace_return_skipped // we already saved x0
|
|
|
|
|
|
/*
|
|
|
* Ok, we need to do extra processing, enter the slow path.
|
|
|
*/
|
|
|
-fast_work_pending:
|
|
|
- str x0, [sp, #S_X0] // returned x0
|
|
|
work_pending:
|
|
|
tbnz x1, #TIF_NEED_RESCHED, work_resched
|
|
|
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
|
|
@@ -649,7 +644,7 @@ ret_to_user:
|
|
|
cbnz x2, work_pending
|
|
|
enable_step_tsk x1, x2
|
|
|
no_work_pending:
|
|
|
- kernel_exit 0, ret = 0
|
|
|
+ kernel_exit 0
|
|
|
ENDPROC(ret_to_user)
|
|
|
|
|
|
/*
|