xstate.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. #ifndef __ASM_X86_XSAVE_H
  2. #define __ASM_X86_XSAVE_H
  3. #include <linux/types.h>
  4. #include <asm/processor.h>
  5. #define XSTATE_CPUID 0x0000000d
  6. #define XSTATE_FP 0x1
  7. #define XSTATE_SSE 0x2
  8. #define XSTATE_YMM 0x4
  9. #define XSTATE_BNDREGS 0x8
  10. #define XSTATE_BNDCSR 0x10
  11. #define XSTATE_OPMASK 0x20
  12. #define XSTATE_ZMM_Hi256 0x40
  13. #define XSTATE_Hi16_ZMM 0x80
  14. /* The highest xstate bit above (of XSTATE_Hi16_ZMM): */
  15. #define XFEATURES_NR_MAX 8
  16. #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
  17. #define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
  18. /* Bit 63 of XCR0 is reserved for future expansion */
  19. #define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
  20. #define FXSAVE_SIZE 512
  21. #define XSAVE_HDR_SIZE 64
  22. #define XSAVE_HDR_OFFSET FXSAVE_SIZE
  23. #define XSAVE_YMM_SIZE 256
  24. #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
  25. /* Supported features which support lazy state saving */
  26. #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
  27. | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
  28. /* Supported features which require eager state saving */
  29. #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
  30. /* All currently supported features */
  31. #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
  32. #ifdef CONFIG_X86_64
  33. #define REX_PREFIX "0x48, "
  34. #else
  35. #define REX_PREFIX
  36. #endif
  37. extern unsigned int xstate_size;
  38. extern u64 xfeatures_mask;
  39. extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
  40. extern struct xsave_struct init_xstate_ctx;
  41. extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
  42. /* These macros all use (%edi)/(%rdi) as the single memory argument. */
  43. #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
  44. #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
  45. #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
  46. #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
  47. #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
  48. #define xstate_fault ".section .fixup,\"ax\"\n" \
  49. "3: movl $-1,%[err]\n" \
  50. " jmp 2b\n" \
  51. ".previous\n" \
  52. _ASM_EXTABLE(1b, 3b) \
  53. : [err] "=r" (err)
  54. /*
  55. * This function is called only during boot time when x86 caps are not set
  56. * up and alternative can not be used yet.
  57. */
  58. static inline int xsave_state_booting(struct xsave_struct *fx)
  59. {
  60. u64 mask = -1;
  61. u32 lmask = mask;
  62. u32 hmask = mask >> 32;
  63. int err = 0;
  64. WARN_ON(system_state != SYSTEM_BOOTING);
  65. if (boot_cpu_has(X86_FEATURE_XSAVES))
  66. asm volatile("1:"XSAVES"\n\t"
  67. "2:\n\t"
  68. xstate_fault
  69. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  70. : "memory");
  71. else
  72. asm volatile("1:"XSAVE"\n\t"
  73. "2:\n\t"
  74. xstate_fault
  75. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  76. : "memory");
  77. return err;
  78. }
  79. /*
  80. * This function is called only during boot time when x86 caps are not set
  81. * up and alternative can not be used yet.
  82. */
  83. static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
  84. {
  85. u32 lmask = mask;
  86. u32 hmask = mask >> 32;
  87. int err = 0;
  88. WARN_ON(system_state != SYSTEM_BOOTING);
  89. if (boot_cpu_has(X86_FEATURE_XSAVES))
  90. asm volatile("1:"XRSTORS"\n\t"
  91. "2:\n\t"
  92. xstate_fault
  93. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  94. : "memory");
  95. else
  96. asm volatile("1:"XRSTOR"\n\t"
  97. "2:\n\t"
  98. xstate_fault
  99. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  100. : "memory");
  101. return err;
  102. }
  103. /*
  104. * Save processor xstate to xsave area.
  105. */
  106. static inline int xsave_state(struct xsave_struct *fx)
  107. {
  108. u64 mask = -1;
  109. u32 lmask = mask;
  110. u32 hmask = mask >> 32;
  111. int err = 0;
  112. WARN_ON(system_state == SYSTEM_BOOTING);
  113. /*
  114. * If xsaves is enabled, xsaves replaces xsaveopt because
  115. * it supports compact format and supervisor states in addition to
  116. * modified optimization in xsaveopt.
  117. *
  118. * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
  119. * because xsaveopt supports modified optimization which is not
  120. * supported by xsave.
  121. *
  122. * If none of xsaves and xsaveopt is enabled, use xsave.
  123. */
  124. alternative_input_2(
  125. "1:"XSAVE,
  126. XSAVEOPT,
  127. X86_FEATURE_XSAVEOPT,
  128. XSAVES,
  129. X86_FEATURE_XSAVES,
  130. [fx] "D" (fx), "a" (lmask), "d" (hmask) :
  131. "memory");
  132. asm volatile("2:\n\t"
  133. xstate_fault
  134. : "0" (0)
  135. : "memory");
  136. return err;
  137. }
  138. /*
  139. * Restore processor xstate from xsave area.
  140. */
  141. static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
  142. {
  143. int err = 0;
  144. u32 lmask = mask;
  145. u32 hmask = mask >> 32;
  146. /*
  147. * Use xrstors to restore context if it is enabled. xrstors supports
  148. * compacted format of xsave area which is not supported by xrstor.
  149. */
  150. alternative_input(
  151. "1: " XRSTOR,
  152. XRSTORS,
  153. X86_FEATURE_XSAVES,
  154. "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  155. : "memory");
  156. asm volatile("2:\n"
  157. xstate_fault
  158. : "0" (0)
  159. : "memory");
  160. return err;
  161. }
  162. /*
  163. * Restore xstate context for new process during context switch.
  164. */
  165. static inline int fpu_xrstor_checking(struct xsave_struct *fx)
  166. {
  167. return xrstor_state(fx, -1);
  168. }
  169. /*
  170. * Save xstate to user space xsave area.
  171. *
  172. * We don't use modified optimization because xrstor/xrstors might track
  173. * a different application.
  174. *
  175. * We don't use compacted format xsave area for
  176. * backward compatibility for old applications which don't understand
  177. * compacted format of xsave area.
  178. */
  179. static inline int xsave_user(struct xsave_struct __user *buf)
  180. {
  181. int err;
  182. /*
  183. * Clear the xsave header first, so that reserved fields are
  184. * initialized to zero.
  185. */
  186. err = __clear_user(&buf->header, sizeof(buf->header));
  187. if (unlikely(err))
  188. return -EFAULT;
  189. __asm__ __volatile__(ASM_STAC "\n"
  190. "1:"XSAVE"\n"
  191. "2: " ASM_CLAC "\n"
  192. xstate_fault
  193. : "D" (buf), "a" (-1), "d" (-1), "0" (0)
  194. : "memory");
  195. return err;
  196. }
  197. /*
  198. * Restore xstate from user space xsave area.
  199. */
  200. static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
  201. {
  202. int err = 0;
  203. struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
  204. u32 lmask = mask;
  205. u32 hmask = mask >> 32;
  206. __asm__ __volatile__(ASM_STAC "\n"
  207. "1:"XRSTOR"\n"
  208. "2: " ASM_CLAC "\n"
  209. xstate_fault
  210. : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
  211. : "memory"); /* memory required? */
  212. return err;
  213. }
  214. void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
  215. void setup_xstate_comp(void);
  216. #endif