switch_to.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * Copyright IBM Corp. 1999, 2009
  3. *
  4. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  5. */
  6. #ifndef __ASM_SWITCH_TO_H
  7. #define __ASM_SWITCH_TO_H
  8. #include <linux/thread_info.h>
  9. #include <asm/ptrace.h>
  10. extern struct task_struct *__switch_to(void *, void *);
  11. extern void update_cr_regs(struct task_struct *task);
  12. static inline int test_fp_ctl(u32 fpc)
  13. {
  14. u32 orig_fpc;
  15. int rc;
  16. if (!MACHINE_HAS_IEEE)
  17. return 0;
  18. asm volatile(
  19. " efpc %1\n"
  20. " sfpc %2\n"
  21. "0: sfpc %1\n"
  22. " la %0,0\n"
  23. "1:\n"
  24. EX_TABLE(0b,1b)
  25. : "=d" (rc), "=d" (orig_fpc)
  26. : "d" (fpc), "0" (-EINVAL));
  27. return rc;
  28. }
  29. static inline void save_fp_ctl(u32 *fpc)
  30. {
  31. if (!MACHINE_HAS_IEEE)
  32. return;
  33. asm volatile(
  34. " stfpc %0\n"
  35. : "+Q" (*fpc));
  36. }
  37. static inline int restore_fp_ctl(u32 *fpc)
  38. {
  39. int rc;
  40. if (!MACHINE_HAS_IEEE)
  41. return 0;
  42. asm volatile(
  43. " lfpc %1\n"
  44. "0: la %0,0\n"
  45. "1:\n"
  46. EX_TABLE(0b,1b)
  47. : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
  48. return rc;
  49. }
  50. static inline void save_fp_regs(freg_t *fprs)
  51. {
  52. asm volatile("std 0,%0" : "=Q" (fprs[0]));
  53. asm volatile("std 2,%0" : "=Q" (fprs[2]));
  54. asm volatile("std 4,%0" : "=Q" (fprs[4]));
  55. asm volatile("std 6,%0" : "=Q" (fprs[6]));
  56. if (!MACHINE_HAS_IEEE)
  57. return;
  58. asm volatile("std 1,%0" : "=Q" (fprs[1]));
  59. asm volatile("std 3,%0" : "=Q" (fprs[3]));
  60. asm volatile("std 5,%0" : "=Q" (fprs[5]));
  61. asm volatile("std 7,%0" : "=Q" (fprs[7]));
  62. asm volatile("std 8,%0" : "=Q" (fprs[8]));
  63. asm volatile("std 9,%0" : "=Q" (fprs[9]));
  64. asm volatile("std 10,%0" : "=Q" (fprs[10]));
  65. asm volatile("std 11,%0" : "=Q" (fprs[11]));
  66. asm volatile("std 12,%0" : "=Q" (fprs[12]));
  67. asm volatile("std 13,%0" : "=Q" (fprs[13]));
  68. asm volatile("std 14,%0" : "=Q" (fprs[14]));
  69. asm volatile("std 15,%0" : "=Q" (fprs[15]));
  70. }
  71. static inline void restore_fp_regs(freg_t *fprs)
  72. {
  73. asm volatile("ld 0,%0" : : "Q" (fprs[0]));
  74. asm volatile("ld 2,%0" : : "Q" (fprs[2]));
  75. asm volatile("ld 4,%0" : : "Q" (fprs[4]));
  76. asm volatile("ld 6,%0" : : "Q" (fprs[6]));
  77. if (!MACHINE_HAS_IEEE)
  78. return;
  79. asm volatile("ld 1,%0" : : "Q" (fprs[1]));
  80. asm volatile("ld 3,%0" : : "Q" (fprs[3]));
  81. asm volatile("ld 5,%0" : : "Q" (fprs[5]));
  82. asm volatile("ld 7,%0" : : "Q" (fprs[7]));
  83. asm volatile("ld 8,%0" : : "Q" (fprs[8]));
  84. asm volatile("ld 9,%0" : : "Q" (fprs[9]));
  85. asm volatile("ld 10,%0" : : "Q" (fprs[10]));
  86. asm volatile("ld 11,%0" : : "Q" (fprs[11]));
  87. asm volatile("ld 12,%0" : : "Q" (fprs[12]));
  88. asm volatile("ld 13,%0" : : "Q" (fprs[13]));
  89. asm volatile("ld 14,%0" : : "Q" (fprs[14]));
  90. asm volatile("ld 15,%0" : : "Q" (fprs[15]));
  91. }
  92. static inline void save_vx_regs(__vector128 *vxrs)
  93. {
  94. typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
  95. asm volatile(
  96. " la 1,%0\n"
  97. " .word 0xe70f,0x1000,0x003e\n" /* vstm 0,15,0(1) */
  98. " .word 0xe70f,0x1100,0x0c3e\n" /* vstm 16,31,256(1) */
  99. : "=Q" (*(addrtype *) vxrs) : : "1");
  100. }
  101. static inline void save_vx_regs_safe(__vector128 *vxrs)
  102. {
  103. unsigned long cr0, flags;
  104. flags = arch_local_irq_save();
  105. __ctl_store(cr0, 0, 0);
  106. __ctl_set_bit(0, 17);
  107. __ctl_set_bit(0, 18);
  108. save_vx_regs(vxrs);
  109. __ctl_load(cr0, 0, 0);
  110. arch_local_irq_restore(flags);
  111. }
  112. static inline void restore_vx_regs(__vector128 *vxrs)
  113. {
  114. typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
  115. asm volatile(
  116. " la 1,%0\n"
  117. " .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
  118. " .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
  119. : : "Q" (*(addrtype *) vxrs) : "1");
  120. }
  121. static inline void save_fp_vx_regs(struct task_struct *task)
  122. {
  123. #ifdef CONFIG_64BIT
  124. if (task->thread.vxrs)
  125. save_vx_regs(task->thread.vxrs);
  126. else
  127. #endif
  128. save_fp_regs(task->thread.fp_regs.fprs);
  129. }
  130. static inline void restore_fp_vx_regs(struct task_struct *task)
  131. {
  132. #ifdef CONFIG_64BIT
  133. if (task->thread.vxrs)
  134. restore_vx_regs(task->thread.vxrs);
  135. else
  136. #endif
  137. restore_fp_regs(task->thread.fp_regs.fprs);
  138. }
  139. static inline void save_access_regs(unsigned int *acrs)
  140. {
  141. typedef struct { int _[NUM_ACRS]; } acrstype;
  142. asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
  143. }
  144. static inline void restore_access_regs(unsigned int *acrs)
  145. {
  146. typedef struct { int _[NUM_ACRS]; } acrstype;
  147. asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
  148. }
  149. #define switch_to(prev,next,last) do { \
  150. if (prev->mm) { \
  151. save_fp_ctl(&prev->thread.fp_regs.fpc); \
  152. save_fp_vx_regs(prev); \
  153. save_access_regs(&prev->thread.acrs[0]); \
  154. save_ri_cb(prev->thread.ri_cb); \
  155. } \
  156. if (next->mm) { \
  157. update_cr_regs(next); \
  158. restore_fp_ctl(&next->thread.fp_regs.fpc); \
  159. restore_fp_vx_regs(next); \
  160. restore_access_regs(&next->thread.acrs[0]); \
  161. restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
  162. } \
  163. prev = __switch_to(prev,next); \
  164. } while (0)
  165. #endif /* __ASM_SWITCH_TO_H */