|
@@ -2,8 +2,12 @@
|
|
|
#define _ASM_X86_SWITCH_TO_H
|
|
|
|
|
|
struct task_struct; /* one of the stranger aspects of C forward declarations */
|
|
|
+
|
|
|
+struct task_struct *__switch_to_asm(struct task_struct *prev,
|
|
|
+ struct task_struct *next);
|
|
|
+
|
|
|
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
- struct task_struct *next);
|
|
|
+ struct task_struct *next);
|
|
|
struct tss_struct;
|
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
|
struct tss_struct *tss);
|
|
@@ -32,131 +36,30 @@ static inline void prepare_switch_to(struct task_struct *prev,
|
|
|
|
|
|
/* data that is pointed to by thread.sp */
|
|
|
struct inactive_task_frame {
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ unsigned long r15;
|
|
|
+ unsigned long r14;
|
|
|
+ unsigned long r13;
|
|
|
+ unsigned long r12;
|
|
|
+#else
|
|
|
+ unsigned long si;
|
|
|
+ unsigned long di;
|
|
|
+#endif
|
|
|
+ unsigned long bx;
|
|
|
unsigned long bp;
|
|
|
+ unsigned long ret_addr;
|
|
|
};
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
-
|
|
|
-#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
-#define __switch_canary \
|
|
|
- "movl %P[task_canary](%[next]), %%ebx\n\t" \
|
|
|
- "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
|
|
|
-#define __switch_canary_oparam \
|
|
|
- , [stack_canary] "=m" (stack_canary.canary)
|
|
|
-#define __switch_canary_iparam \
|
|
|
- , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
|
|
-#else /* CC_STACKPROTECTOR */
|
|
|
-#define __switch_canary
|
|
|
-#define __switch_canary_oparam
|
|
|
-#define __switch_canary_iparam
|
|
|
-#endif /* CC_STACKPROTECTOR */
|
|
|
+struct fork_frame {
|
|
|
+ struct inactive_task_frame frame;
|
|
|
+ struct pt_regs regs;
|
|
|
+};
|
|
|
|
|
|
-/*
|
|
|
- * Saving eflags is important. It switches not only IOPL between tasks,
|
|
|
- * it also protects other tasks from NT leaking through sysenter etc.
|
|
|
- */
|
|
|
#define switch_to(prev, next, last) \
|
|
|
do { \
|
|
|
- /* \
|
|
|
- * Context-switching clobbers all registers, so we clobber \
|
|
|
- * them explicitly, via unused output variables. \
|
|
|
- * (EAX and EBP is not listed because EBP is saved/restored \
|
|
|
- * explicitly for wchan access and EAX is the return value of \
|
|
|
- * __switch_to()) \
|
|
|
- */ \
|
|
|
- unsigned long ebx, ecx, edx, esi, edi; \
|
|
|
- \
|
|
|
prepare_switch_to(prev, next); \
|
|
|
\
|
|
|
- asm volatile("pushl %%ebp\n\t" /* save EBP */ \
|
|
|
- "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
|
|
|
- "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
|
|
|
- "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
|
|
|
- "pushl %[next_ip]\n\t" /* restore EIP */ \
|
|
|
- __switch_canary \
|
|
|
- "jmp __switch_to\n" /* regparm call */ \
|
|
|
- "1:\t" \
|
|
|
- "popl %%ebp\n\t" /* restore EBP */ \
|
|
|
- \
|
|
|
- /* output parameters */ \
|
|
|
- : [prev_sp] "=m" (prev->thread.sp), \
|
|
|
- [prev_ip] "=m" (prev->thread.ip), \
|
|
|
- "=a" (last), \
|
|
|
- \
|
|
|
- /* clobbered output registers: */ \
|
|
|
- "=b" (ebx), "=c" (ecx), "=d" (edx), \
|
|
|
- "=S" (esi), "=D" (edi) \
|
|
|
- \
|
|
|
- __switch_canary_oparam \
|
|
|
- \
|
|
|
- /* input parameters: */ \
|
|
|
- : [next_sp] "m" (next->thread.sp), \
|
|
|
- [next_ip] "m" (next->thread.ip), \
|
|
|
- \
|
|
|
- /* regparm parameters for __switch_to(): */ \
|
|
|
- [prev] "a" (prev), \
|
|
|
- [next] "d" (next) \
|
|
|
- \
|
|
|
- __switch_canary_iparam \
|
|
|
- \
|
|
|
- : /* reloaded segment registers */ \
|
|
|
- "memory"); \
|
|
|
+ ((last) = __switch_to_asm((prev), (next))); \
|
|
|
} while (0)
|
|
|
|
|
|
-#else /* CONFIG_X86_32 */
|
|
|
-
|
|
|
-/* frame pointer must be last for get_wchan */
|
|
|
-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
|
|
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
|
|
|
-
|
|
|
-#define __EXTRA_CLOBBER \
|
|
|
- , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
|
|
- "r12", "r13", "r14", "r15", "flags"
|
|
|
-
|
|
|
-#ifdef CONFIG_CC_STACKPROTECTOR
|
|
|
-#define __switch_canary \
|
|
|
- "movq %P[task_canary](%%rsi),%%r8\n\t" \
|
|
|
- "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
|
|
|
-#define __switch_canary_oparam \
|
|
|
- , [gs_canary] "=m" (irq_stack_union.stack_canary)
|
|
|
-#define __switch_canary_iparam \
|
|
|
- , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
|
|
|
-#else /* CC_STACKPROTECTOR */
|
|
|
-#define __switch_canary
|
|
|
-#define __switch_canary_oparam
|
|
|
-#define __switch_canary_iparam
|
|
|
-#endif /* CC_STACKPROTECTOR */
|
|
|
-
|
|
|
-/*
|
|
|
- * There is no need to save or restore flags, because flags are always
|
|
|
- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
|
|
- * has no effect.
|
|
|
- */
|
|
|
-#define switch_to(prev, next, last) \
|
|
|
- prepare_switch_to(prev, next); \
|
|
|
- \
|
|
|
- asm volatile(SAVE_CONTEXT \
|
|
|
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
|
|
- "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
|
|
|
- "call __switch_to\n\t" \
|
|
|
- "movq "__percpu_arg([current_task])",%%rsi\n\t" \
|
|
|
- __switch_canary \
|
|
|
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
|
|
|
- "movq %%rax,%%rdi\n\t" \
|
|
|
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
|
|
|
- "jnz ret_from_fork\n\t" \
|
|
|
- RESTORE_CONTEXT \
|
|
|
- : "=a" (last) \
|
|
|
- __switch_canary_oparam \
|
|
|
- : [next] "S" (next), [prev] "D" (prev), \
|
|
|
- [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
|
|
|
- [ti_flags] "i" (offsetof(struct thread_info, flags)), \
|
|
|
- [_tif_fork] "i" (_TIF_FORK), \
|
|
|
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
|
|
|
- [current_task] "m" (current_task) \
|
|
|
- __switch_canary_iparam \
|
|
|
- : "memory", "cc" __EXTRA_CLOBBER)
|
|
|
-
|
|
|
-#endif /* CONFIG_X86_32 */
|
|
|
-
|
|
|
#endif /* _ASM_X86_SWITCH_TO_H */
|