msr.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. #ifndef _ASM_X86_MSR_H
  2. #define _ASM_X86_MSR_H
  3. #include "msr-index.h"
  4. #ifndef __ASSEMBLY__
  5. #include <asm/asm.h>
  6. #include <asm/errno.h>
  7. #include <asm/cpumask.h>
  8. #include <uapi/asm/msr.h>
  9. struct msr {
  10. union {
  11. struct {
  12. u32 l;
  13. u32 h;
  14. };
  15. u64 q;
  16. };
  17. };
  18. struct msr_info {
  19. u32 msr_no;
  20. struct msr reg;
  21. struct msr *msrs;
  22. int err;
  23. };
  24. struct msr_regs_info {
  25. u32 *regs;
  26. int err;
  27. };
  28. struct saved_msr {
  29. bool valid;
  30. struct msr_info info;
  31. };
  32. struct saved_msrs {
  33. unsigned int num;
  34. struct saved_msr *array;
  35. };
  36. /*
  37. * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
  38. * constraint has different meanings. For i386, "A" means exactly
  39. * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
  40. * it means rax *or* rdx.
  41. */
  42. #ifdef CONFIG_X86_64
  43. /* Using 64-bit values saves one instruction clearing the high half of low */
  44. #define DECLARE_ARGS(val, low, high) unsigned long low, high
  45. #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
  46. #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
  47. #else
  48. #define DECLARE_ARGS(val, low, high) unsigned long long val
  49. #define EAX_EDX_VAL(val, low, high) (val)
  50. #define EAX_EDX_RET(val, low, high) "=A" (val)
  51. #endif
  52. #ifdef CONFIG_TRACEPOINTS
  53. /*
  54. * Be very careful with includes. This header is prone to include loops.
  55. */
  56. #include <asm/atomic.h>
  57. #include <linux/tracepoint-defs.h>
  58. extern struct tracepoint __tracepoint_read_msr;
  59. extern struct tracepoint __tracepoint_write_msr;
  60. extern struct tracepoint __tracepoint_rdpmc;
  61. #define msr_tracepoint_active(t) static_key_false(&(t).key)
  62. extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
  63. extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
  64. extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
  65. #else
  66. #define msr_tracepoint_active(t) false
  67. static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
  68. static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
  69. static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
  70. #endif
  71. static inline unsigned long long native_read_msr(unsigned int msr)
  72. {
  73. DECLARE_ARGS(val, low, high);
  74. asm volatile("1: rdmsr\n"
  75. "2:\n"
  76. _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_rdmsr_unsafe)
  77. : EAX_EDX_RET(val, low, high) : "c" (msr));
  78. if (msr_tracepoint_active(__tracepoint_read_msr))
  79. do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
  80. return EAX_EDX_VAL(val, low, high);
  81. }
  82. static inline unsigned long long native_read_msr_safe(unsigned int msr,
  83. int *err)
  84. {
  85. DECLARE_ARGS(val, low, high);
  86. asm volatile("2: rdmsr ; xor %[err],%[err]\n"
  87. "1:\n\t"
  88. ".section .fixup,\"ax\"\n\t"
  89. "3: mov %[fault],%[err]\n\t"
  90. "xorl %%eax, %%eax\n\t"
  91. "xorl %%edx, %%edx\n\t"
  92. "jmp 1b\n\t"
  93. ".previous\n\t"
  94. _ASM_EXTABLE(2b, 3b)
  95. : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
  96. : "c" (msr), [fault] "i" (-EIO));
  97. if (msr_tracepoint_active(__tracepoint_read_msr))
  98. do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
  99. return EAX_EDX_VAL(val, low, high);
  100. }
  101. /* Can be uninlined because referenced by paravirt */
  102. notrace static inline void native_write_msr(unsigned int msr,
  103. unsigned low, unsigned high)
  104. {
  105. asm volatile("1: wrmsr\n"
  106. "2:\n"
  107. _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
  108. : : "c" (msr), "a"(low), "d" (high) : "memory");
  109. if (msr_tracepoint_active(__tracepoint_write_msr))
  110. do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
  111. }
  112. /* Can be uninlined because referenced by paravirt */
  113. notrace static inline int native_write_msr_safe(unsigned int msr,
  114. unsigned low, unsigned high)
  115. {
  116. int err;
  117. asm volatile("2: wrmsr ; xor %[err],%[err]\n"
  118. "1:\n\t"
  119. ".section .fixup,\"ax\"\n\t"
  120. "3: mov %[fault],%[err] ; jmp 1b\n\t"
  121. ".previous\n\t"
  122. _ASM_EXTABLE(2b, 3b)
  123. : [err] "=a" (err)
  124. : "c" (msr), "0" (low), "d" (high),
  125. [fault] "i" (-EIO)
  126. : "memory");
  127. if (msr_tracepoint_active(__tracepoint_write_msr))
  128. do_trace_write_msr(msr, ((u64)high << 32 | low), err);
  129. return err;
  130. }
  131. extern int rdmsr_safe_regs(u32 regs[8]);
  132. extern int wrmsr_safe_regs(u32 regs[8]);
  133. /**
  134. * rdtsc() - returns the current TSC without ordering constraints
  135. *
  136. * rdtsc() returns the result of RDTSC as a 64-bit integer. The
  137. * only ordering constraint it supplies is the ordering implied by
  138. * "asm volatile": it will put the RDTSC in the place you expect. The
  139. * CPU can and will speculatively execute that RDTSC, though, so the
  140. * results can be non-monotonic if compared on different CPUs.
  141. */
  142. static __always_inline unsigned long long rdtsc(void)
  143. {
  144. DECLARE_ARGS(val, low, high);
  145. asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
  146. return EAX_EDX_VAL(val, low, high);
  147. }
  148. /**
  149. * rdtsc_ordered() - read the current TSC in program order
  150. *
  151. * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
  152. * It is ordered like a load to a global in-memory counter. It should
  153. * be impossible to observe non-monotonic rdtsc_unordered() behavior
  154. * across multiple CPUs as long as the TSC is synced.
  155. */
  156. static __always_inline unsigned long long rdtsc_ordered(void)
  157. {
  158. /*
  159. * The RDTSC instruction is not ordered relative to memory
  160. * access. The Intel SDM and the AMD APM are both vague on this
  161. * point, but empirically an RDTSC instruction can be
  162. * speculatively executed before prior loads. An RDTSC
  163. * immediately after an appropriate barrier appears to be
  164. * ordered as a normal load, that is, it provides the same
  165. * ordering guarantees as reading from a global memory location
  166. * that some other imaginary CPU is updating continuously with a
  167. * time stamp.
  168. */
  169. alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
  170. "lfence", X86_FEATURE_LFENCE_RDTSC);
  171. return rdtsc();
  172. }
  173. /* Deprecated, keep it for a cycle for easier merging: */
  174. #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
  175. static inline unsigned long long native_read_pmc(int counter)
  176. {
  177. DECLARE_ARGS(val, low, high);
  178. asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
  179. if (msr_tracepoint_active(__tracepoint_rdpmc))
  180. do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
  181. return EAX_EDX_VAL(val, low, high);
  182. }
  183. #ifdef CONFIG_PARAVIRT
  184. #include <asm/paravirt.h>
  185. #else
  186. #include <linux/errno.h>
  187. /*
  188. * Access to machine-specific registers (available on 586 and better only)
  189. * Note: the rd* operations modify the parameters directly (without using
  190. * pointer indirection), this allows gcc to optimize better
  191. */
  192. #define rdmsr(msr, low, high) \
  193. do { \
  194. u64 __val = native_read_msr((msr)); \
  195. (void)((low) = (u32)__val); \
  196. (void)((high) = (u32)(__val >> 32)); \
  197. } while (0)
  198. static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
  199. {
  200. native_write_msr(msr, low, high);
  201. }
  202. #define rdmsrl(msr, val) \
  203. ((val) = native_read_msr((msr)))
  204. static inline void wrmsrl(unsigned msr, u64 val)
  205. {
  206. native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
  207. }
  208. /* wrmsr with exception handling */
  209. static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
  210. {
  211. return native_write_msr_safe(msr, low, high);
  212. }
  213. /* rdmsr with exception handling */
  214. #define rdmsr_safe(msr, low, high) \
  215. ({ \
  216. int __err; \
  217. u64 __val = native_read_msr_safe((msr), &__err); \
  218. (*low) = (u32)__val; \
  219. (*high) = (u32)(__val >> 32); \
  220. __err; \
  221. })
  222. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  223. {
  224. int err;
  225. *p = native_read_msr_safe(msr, &err);
  226. return err;
  227. }
  228. #define rdpmc(counter, low, high) \
  229. do { \
  230. u64 _l = native_read_pmc((counter)); \
  231. (low) = (u32)_l; \
  232. (high) = (u32)(_l >> 32); \
  233. } while (0)
  234. #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
  235. #endif /* !CONFIG_PARAVIRT */
  236. /*
  237. * 64-bit version of wrmsr_safe():
  238. */
  239. static inline int wrmsrl_safe(u32 msr, u64 val)
  240. {
  241. return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
  242. }
  243. #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
  244. #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
  245. struct msr *msrs_alloc(void);
  246. void msrs_free(struct msr *msrs);
  247. int msr_set_bit(u32 msr, u8 bit);
  248. int msr_clear_bit(u32 msr, u8 bit);
  249. #ifdef CONFIG_SMP
  250. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  251. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  252. int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
  253. int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
  254. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  255. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  256. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  257. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  258. int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
  259. int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
  260. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  261. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  262. #else /* CONFIG_SMP */
  263. static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  264. {
  265. rdmsr(msr_no, *l, *h);
  266. return 0;
  267. }
  268. static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  269. {
  270. wrmsr(msr_no, l, h);
  271. return 0;
  272. }
  273. static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  274. {
  275. rdmsrl(msr_no, *q);
  276. return 0;
  277. }
  278. static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  279. {
  280. wrmsrl(msr_no, q);
  281. return 0;
  282. }
  283. static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  284. struct msr *msrs)
  285. {
  286. rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
  287. }
  288. static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  289. struct msr *msrs)
  290. {
  291. wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
  292. }
  293. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
  294. u32 *l, u32 *h)
  295. {
  296. return rdmsr_safe(msr_no, l, h);
  297. }
  298. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  299. {
  300. return wrmsr_safe(msr_no, l, h);
  301. }
  302. static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  303. {
  304. return rdmsrl_safe(msr_no, q);
  305. }
  306. static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  307. {
  308. return wrmsrl_safe(msr_no, q);
  309. }
  310. static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  311. {
  312. return rdmsr_safe_regs(regs);
  313. }
  314. static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  315. {
  316. return wrmsr_safe_regs(regs);
  317. }
  318. #endif /* CONFIG_SMP */
  319. #endif /* __ASSEMBLY__ */
  320. #endif /* _ASM_X86_MSR_H */