12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667 |
- #ifndef _ASM_X86_SWITCH_TO_H
- #define _ASM_X86_SWITCH_TO_H
- struct task_struct; /* one of the stranger aspects of C forward declarations */
- struct task_struct *__switch_to_asm(struct task_struct *prev,
- struct task_struct *next);
- __visible struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next);
- struct tss_struct;
- void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss);
- /* This runs runs on the previous thread's stack. */
- static inline void prepare_switch_to(struct task_struct *prev,
- struct task_struct *next)
- {
- #ifdef CONFIG_VMAP_STACK
- /*
- * If we switch to a stack that has a top-level paging entry
- * that is not present in the current mm, the resulting #PF will
- * will be promoted to a double-fault and we'll panic. Probe
- * the new stack now so that vmalloc_fault can fix up the page
- * tables if needed. This can only happen if we use a stack
- * in vmap space.
- *
- * We assume that the stack is aligned so that it never spans
- * more than one top-level paging entry.
- *
- * To minimize cache pollution, just follow the stack pointer.
- */
- READ_ONCE(*(unsigned char *)next->thread.sp);
- #endif
- }
- asmlinkage void ret_from_fork(void);
- /* data that is pointed to by thread.sp */
- struct inactive_task_frame {
- #ifdef CONFIG_X86_64
- unsigned long r15;
- unsigned long r14;
- unsigned long r13;
- unsigned long r12;
- #else
- unsigned long si;
- unsigned long di;
- #endif
- unsigned long bx;
- unsigned long bp;
- unsigned long ret_addr;
- };
- struct fork_frame {
- struct inactive_task_frame frame;
- struct pt_regs regs;
- };
- #define switch_to(prev, next, last) \
- do { \
- prepare_switch_to(prev, next); \
- \
- ((last) = __switch_to_asm((prev), (next))); \
- } while (0)
- #endif /* _ASM_X86_SWITCH_TO_H */
|