|
@@ -710,38 +710,6 @@ el0_irq_naked:
|
|
|
b ret_to_user
|
|
|
ENDPROC(el0_irq)
|
|
|
|
|
|
-/*
|
|
|
- * Register switch for AArch64. The callee-saved registers need to be saved
|
|
|
- * and restored. On entry:
|
|
|
- * x0 = previous task_struct (must be preserved across the switch)
|
|
|
- * x1 = next task_struct
|
|
|
- * Previous and next are guaranteed not to be the same.
|
|
|
- *
|
|
|
- */
|
|
|
-ENTRY(cpu_switch_to)
|
|
|
- mov x10, #THREAD_CPU_CONTEXT
|
|
|
- add x8, x0, x10
|
|
|
- mov x9, sp
|
|
|
- stp x19, x20, [x8], #16 // store callee-saved registers
|
|
|
- stp x21, x22, [x8], #16
|
|
|
- stp x23, x24, [x8], #16
|
|
|
- stp x25, x26, [x8], #16
|
|
|
- stp x27, x28, [x8], #16
|
|
|
- stp x29, x9, [x8], #16
|
|
|
- str lr, [x8]
|
|
|
- add x8, x1, x10
|
|
|
- ldp x19, x20, [x8], #16 // restore callee-saved registers
|
|
|
- ldp x21, x22, [x8], #16
|
|
|
- ldp x23, x24, [x8], #16
|
|
|
- ldp x25, x26, [x8], #16
|
|
|
- ldp x27, x28, [x8], #16
|
|
|
- ldp x29, x9, [x8], #16
|
|
|
- ldr lr, [x8]
|
|
|
- mov sp, x9
|
|
|
- msr sp_el0, x1
|
|
|
- ret
|
|
|
-ENDPROC(cpu_switch_to)
|
|
|
-
|
|
|
/*
|
|
|
* This is the fast syscall return path. We do as little as possible here,
|
|
|
* and this includes saving x0 back into the kernel stack.
|
|
@@ -784,18 +752,6 @@ finish_ret_to_user:
|
|
|
kernel_exit 0
|
|
|
ENDPROC(ret_to_user)
|
|
|
|
|
|
-/*
|
|
|
- * This is how we return from a fork.
|
|
|
- */
|
|
|
-ENTRY(ret_from_fork)
|
|
|
- bl schedule_tail
|
|
|
- cbz x19, 1f // not a kernel thread
|
|
|
- mov x0, x20
|
|
|
- blr x19
|
|
|
-1: get_thread_info tsk
|
|
|
- b ret_to_user
|
|
|
-ENDPROC(ret_from_fork)
|
|
|
-
|
|
|
/*
|
|
|
* SVC handler.
|
|
|
*/
|
|
@@ -869,3 +825,49 @@ ENTRY(sys_rt_sigreturn_wrapper)
|
|
|
mov x0, sp
|
|
|
b sys_rt_sigreturn
|
|
|
ENDPROC(sys_rt_sigreturn_wrapper)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Register switch for AArch64. The callee-saved registers need to be saved
|
|
|
+ * and restored. On entry:
|
|
|
+ * x0 = previous task_struct (must be preserved across the switch)
|
|
|
+ * x1 = next task_struct
|
|
|
+ * Previous and next are guaranteed not to be the same.
|
|
|
+ *
|
|
|
+ */
|
|
|
+ENTRY(cpu_switch_to)
|
|
|
+ mov x10, #THREAD_CPU_CONTEXT
|
|
|
+ add x8, x0, x10
|
|
|
+ mov x9, sp
|
|
|
+ stp x19, x20, [x8], #16 // store callee-saved registers
|
|
|
+ stp x21, x22, [x8], #16
|
|
|
+ stp x23, x24, [x8], #16
|
|
|
+ stp x25, x26, [x8], #16
|
|
|
+ stp x27, x28, [x8], #16
|
|
|
+ stp x29, x9, [x8], #16
|
|
|
+ str lr, [x8]
|
|
|
+ add x8, x1, x10
|
|
|
+ ldp x19, x20, [x8], #16 // restore callee-saved registers
|
|
|
+ ldp x21, x22, [x8], #16
|
|
|
+ ldp x23, x24, [x8], #16
|
|
|
+ ldp x25, x26, [x8], #16
|
|
|
+ ldp x27, x28, [x8], #16
|
|
|
+ ldp x29, x9, [x8], #16
|
|
|
+ ldr lr, [x8]
|
|
|
+ mov sp, x9
|
|
|
+ msr sp_el0, x1
|
|
|
+ ret
|
|
|
+ENDPROC(cpu_switch_to)
|
|
|
+NOKPROBE(cpu_switch_to)
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is how we return from a fork.
|
|
|
+ */
|
|
|
+ENTRY(ret_from_fork)
|
|
|
+ bl schedule_tail
|
|
|
+ cbz x19, 1f // not a kernel thread
|
|
|
+ mov x0, x20
|
|
|
+ blr x19
|
|
|
+1: get_thread_info tsk
|
|
|
+ b ret_to_user
|
|
|
+ENDPROC(ret_from_fork)
|
|
|
+NOKPROBE(ret_from_fork)
|