|
@@ -90,9 +90,8 @@
|
|
|
|
|
|
.if \el == 0
|
|
|
mrs x21, sp_el0
|
|
|
- mov tsk, sp
|
|
|
- and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
|
|
|
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
|
|
|
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
|
|
|
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
|
|
|
disable_step_tsk x19, x20 // exceptions when scheduling.
|
|
|
|
|
|
mov x29, xzr // fp pointed to user-space
|
|
@@ -100,10 +99,10 @@
|
|
|
add x21, sp, #S_FRAME_SIZE
|
|
|
get_thread_info tsk
|
|
|
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
|
|
|
- ldr x20, [tsk, #TI_ADDR_LIMIT]
|
|
|
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
|
|
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
|
|
mov x20, #TASK_SIZE_64
|
|
|
- str x20, [tsk, #TI_ADDR_LIMIT]
|
|
|
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
|
|
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
|
|
.endif /* \el == 0 */
|
|
|
mrs x22, elr_el1
|
|
@@ -139,7 +138,7 @@
|
|
|
.if \el != 0
|
|
|
/* Restore the task's original addr_limit. */
|
|
|
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
|
|
- str x20, [tsk, #TI_ADDR_LIMIT]
|
|
|
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
|
|
|
|
|
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
|
|
.endif
|
|
@@ -192,13 +191,14 @@ alternative_else_nop_endif
|
|
|
mov x19, sp // preserve the original sp
|
|
|
|
|
|
/*
|
|
|
- * Compare sp with the current thread_info, if the top
|
|
|
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
|
|
|
- * should switch to the irq stack.
|
|
|
+ * Compare sp with the base of the task stack.
|
|
|
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
|
|
|
+ * and should switch to the irq stack.
|
|
|
*/
|
|
|
- and x25, x19, #~(THREAD_SIZE - 1)
|
|
|
- cmp x25, tsk
|
|
|
- b.ne 9998f
|
|
|
+ ldr x25, [tsk, TSK_STACK]
|
|
|
+ eor x25, x25, x19
|
|
|
+ and x25, x25, #~(THREAD_SIZE - 1)
|
|
|
+ cbnz x25, 9998f
|
|
|
|
|
|
adr_this_cpu x25, irq_stack, x26
|
|
|
mov x26, #IRQ_STACK_START_SP
|
|
@@ -427,9 +427,9 @@ el1_irq:
|
|
|
irq_handler
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
|
|
|
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
|
|
cbnz w24, 1f // preempt count != 0
|
|
|
- ldr x0, [tsk, #TI_FLAGS] // get flags
|
|
|
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
|
|
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
|
|
bl el1_preempt
|
|
|
1:
|
|
@@ -444,7 +444,7 @@ ENDPROC(el1_irq)
|
|
|
el1_preempt:
|
|
|
mov x24, lr
|
|
|
1: bl preempt_schedule_irq // irq en/disable is done inside
|
|
|
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
|
|
|
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
|
|
|
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
|
|
|
ret x24
|
|
|
#endif
|
|
@@ -674,8 +674,7 @@ ENTRY(cpu_switch_to)
|
|
|
ldp x29, x9, [x8], #16
|
|
|
ldr lr, [x8]
|
|
|
mov sp, x9
|
|
|
- and x9, x9, #~(THREAD_SIZE - 1)
|
|
|
- msr sp_el0, x9
|
|
|
+ msr sp_el0, x1
|
|
|
ret
|
|
|
ENDPROC(cpu_switch_to)
|
|
|
|
|
@@ -686,7 +685,7 @@ ENDPROC(cpu_switch_to)
|
|
|
ret_fast_syscall:
|
|
|
disable_irq // disable interrupts
|
|
|
str x0, [sp, #S_X0] // returned x0
|
|
|
- ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
|
|
|
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
|
|
|
and x2, x1, #_TIF_SYSCALL_WORK
|
|
|
cbnz x2, ret_fast_syscall_trace
|
|
|
and x2, x1, #_TIF_WORK_MASK
|
|
@@ -706,14 +705,14 @@ work_pending:
|
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
bl trace_hardirqs_on // enabled while in userspace
|
|
|
#endif
|
|
|
- ldr x1, [tsk, #TI_FLAGS] // re-check for single-step
|
|
|
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step
|
|
|
b finish_ret_to_user
|
|
|
/*
|
|
|
* "slow" syscall return path.
|
|
|
*/
|
|
|
ret_to_user:
|
|
|
disable_irq // disable interrupts
|
|
|
- ldr x1, [tsk, #TI_FLAGS]
|
|
|
+ ldr x1, [tsk, #TSK_TI_FLAGS]
|
|
|
and x2, x1, #_TIF_WORK_MASK
|
|
|
cbnz x2, work_pending
|
|
|
finish_ret_to_user:
|
|
@@ -746,7 +745,7 @@ el0_svc_naked: // compat entry point
|
|
|
enable_dbg_and_irq
|
|
|
ct_user_exit 1
|
|
|
|
|
|
- ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
|
|
|
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
|
|
|
tst x16, #_TIF_SYSCALL_WORK
|
|
|
b.ne __sys_trace
|
|
|
cmp scno, sc_nr // check upper syscall limit
|