switch_to.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_SWITCH_TO_H
  3. #define _ASM_X86_SWITCH_TO_H
  4. #include <linux/sched/task_stack.h>
  5. struct task_struct; /* one of the stranger aspects of C forward declarations */
  6. struct task_struct *__switch_to_asm(struct task_struct *prev,
  7. struct task_struct *next);
  8. __visible struct task_struct *__switch_to(struct task_struct *prev,
  9. struct task_struct *next);
  10. struct tss_struct;
  11. void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
  12. struct tss_struct *tss);
  13. /* This runs runs on the previous thread's stack. */
  14. static inline void prepare_switch_to(struct task_struct *next)
  15. {
  16. #ifdef CONFIG_VMAP_STACK
  17. /*
  18. * If we switch to a stack that has a top-level paging entry
  19. * that is not present in the current mm, the resulting #PF will
  20. * will be promoted to a double-fault and we'll panic. Probe
  21. * the new stack now so that vmalloc_fault can fix up the page
  22. * tables if needed. This can only happen if we use a stack
  23. * in vmap space.
  24. *
  25. * We assume that the stack is aligned so that it never spans
  26. * more than one top-level paging entry.
  27. *
  28. * To minimize cache pollution, just follow the stack pointer.
  29. */
  30. READ_ONCE(*(unsigned char *)next->thread.sp);
  31. #endif
  32. }
  33. asmlinkage void ret_from_fork(void);
  34. /*
  35. * This is the structure pointed to by thread.sp for an inactive task. The
  36. * order of the fields must match the code in __switch_to_asm().
  37. */
  38. struct inactive_task_frame {
  39. #ifdef CONFIG_X86_64
  40. unsigned long r15;
  41. unsigned long r14;
  42. unsigned long r13;
  43. unsigned long r12;
  44. #else
  45. unsigned long si;
  46. unsigned long di;
  47. #endif
  48. unsigned long bx;
  49. /*
  50. * These two fields must be together. They form a stack frame header,
  51. * needed by get_frame_pointer().
  52. */
  53. unsigned long bp;
  54. unsigned long ret_addr;
  55. };
  56. struct fork_frame {
  57. struct inactive_task_frame frame;
  58. struct pt_regs regs;
  59. };
  60. #define switch_to(prev, next, last) \
  61. do { \
  62. prepare_switch_to(next); \
  63. \
  64. ((last) = __switch_to_asm((prev), (next))); \
  65. } while (0)
  66. #ifdef CONFIG_X86_32
  67. static inline void refresh_sysenter_cs(struct thread_struct *thread)
  68. {
  69. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  70. if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
  71. return;
  72. this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
  73. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  74. }
  75. #endif
  76. /* This is used when switching tasks or entering/exiting vm86 mode. */
  77. static inline void update_sp0(struct task_struct *task)
  78. {
  79. /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */
  80. #ifdef CONFIG_X86_32
  81. load_sp0(task->thread.sp0);
  82. #else
  83. if (static_cpu_has(X86_FEATURE_XENPV))
  84. load_sp0(task_top_of_stack(task));
  85. #endif
  86. }
  87. #endif /* _ASM_X86_SWITCH_TO_H */