switch_to_32.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __SPARC_SWITCH_TO_H
  3. #define __SPARC_SWITCH_TO_H
  4. #include <asm/smp.h>
  5. extern struct thread_info *current_set[NR_CPUS];
  6. /*
  7. * Flush windows so that the VM switch which follows
  8. * would not pull the stack from under us.
  9. *
  10. * SWITCH_ENTER and SWITCH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
  11. * XXX WTF is the above comment? Found in late teen 2.4.x.
  12. */
  13. #ifdef CONFIG_SMP
  14. #define SWITCH_ENTER(prv) \
  15. do { \
  16. if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \
  17. put_psr(get_psr() | PSR_EF); \
  18. fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \
  19. &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \
  20. clear_tsk_thread_flag(prv, TIF_USEDFPU); \
  21. (prv)->thread.kregs->psr &= ~PSR_EF; \
  22. } \
  23. } while(0)
  24. #define SWITCH_DO_LAZY_FPU(next) /* */
  25. #else
  26. #define SWITCH_ENTER(prv) /* */
  27. #define SWITCH_DO_LAZY_FPU(nxt) \
  28. do { \
  29. if (last_task_used_math != (nxt)) \
  30. (nxt)->thread.kregs->psr&=~PSR_EF; \
  31. } while(0)
  32. #endif
  33. #define prepare_arch_switch(next) do { \
  34. __asm__ __volatile__( \
  35. ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
  36. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  37. "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
  38. "save %sp, -0x40, %sp\n\t" \
  39. "restore; restore; restore; restore; restore; restore; restore"); \
  40. } while(0)
  41. /* Much care has gone into this code, do not touch it.
  42. *
  43. * We need to loadup regs l0/l1 for the newly forked child
  44. * case because the trap return path relies on those registers
  45. * holding certain values, gcc is told that they are clobbered.
  46. * Gcc needs registers for 3 values in and 1 value out, so we
  47. * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM
  48. *
  49. * Hey Dave, that do not touch sign is too much of an incentive
  50. * - Anton & Pete
  51. */
  52. #define switch_to(prev, next, last) do { \
  53. SWITCH_ENTER(prev); \
  54. SWITCH_DO_LAZY_FPU(next); \
  55. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \
  56. __asm__ __volatile__( \
  57. "sethi %%hi(here - 0x8), %%o7\n\t" \
  58. "mov %%g6, %%g3\n\t" \
  59. "or %%o7, %%lo(here - 0x8), %%o7\n\t" \
  60. "rd %%psr, %%g4\n\t" \
  61. "std %%sp, [%%g6 + %4]\n\t" \
  62. "rd %%wim, %%g5\n\t" \
  63. "wr %%g4, 0x20, %%psr\n\t" \
  64. "nop\n\t" \
  65. "std %%g4, [%%g6 + %3]\n\t" \
  66. "ldd [%2 + %3], %%g4\n\t" \
  67. "mov %2, %%g6\n\t" \
  68. ".globl patchme_store_new_current\n" \
  69. "patchme_store_new_current:\n\t" \
  70. "st %2, [%1]\n\t" \
  71. "wr %%g4, 0x20, %%psr\n\t" \
  72. "nop\n\t" \
  73. "nop\n\t" \
  74. "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \
  75. "ldd [%%g6 + %4], %%sp\n\t" \
  76. "wr %%g5, 0x0, %%wim\n\t" \
  77. "ldd [%%sp + 0x00], %%l0\n\t" \
  78. "ldd [%%sp + 0x38], %%i6\n\t" \
  79. "wr %%g4, 0x0, %%psr\n\t" \
  80. "nop\n\t" \
  81. "nop\n\t" \
  82. "jmpl %%o7 + 0x8, %%g0\n\t" \
  83. " ld [%%g3 + %5], %0\n\t" \
  84. "here:\n" \
  85. : "=&r" (last) \
  86. : "r" (&(current_set[hard_smp_processor_id()])), \
  87. "r" (task_thread_info(next)), \
  88. "i" (TI_KPSR), \
  89. "i" (TI_KSP), \
  90. "i" (TI_TASK) \
  91. : "g1", "g2", "g3", "g4", "g5", "g7", \
  92. "l0", "l1", "l3", "l4", "l5", "l6", "l7", \
  93. "i0", "i1", "i2", "i3", "i4", "i5", \
  94. "o0", "o1", "o2", "o3", "o7"); \
  95. } while(0)
  96. void fpsave(unsigned long *fpregs, unsigned long *fsr,
  97. void *fpqueue, unsigned long *fpqdepth);
  98. void synchronize_user_stack(void);
  99. #endif /* __SPARC_SWITCH_TO_H */