msr.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. #ifndef _ASM_X86_MSR_H
  2. #define _ASM_X86_MSR_H
  3. #include "msr-index.h"
  4. #ifndef __ASSEMBLY__
  5. #include <asm/asm.h>
  6. #include <asm/errno.h>
  7. #include <asm/cpumask.h>
  8. #include <uapi/asm/msr.h>
  9. struct msr {
  10. union {
  11. struct {
  12. u32 l;
  13. u32 h;
  14. };
  15. u64 q;
  16. };
  17. };
  18. struct msr_info {
  19. u32 msr_no;
  20. struct msr reg;
  21. struct msr *msrs;
  22. int err;
  23. };
  24. struct msr_regs_info {
  25. u32 *regs;
  26. int err;
  27. };
  28. static inline unsigned long long native_read_tscp(unsigned int *aux)
  29. {
  30. unsigned long low, high;
  31. asm volatile(".byte 0x0f,0x01,0xf9"
  32. : "=a" (low), "=d" (high), "=c" (*aux));
  33. return low | ((u64)high << 32);
  34. }
  35. /*
  36. * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
  37. * constraint has different meanings. For i386, "A" means exactly
  38. * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
  39. * it means rax *or* rdx.
  40. */
  41. #ifdef CONFIG_X86_64
  42. /* Using 64-bit values saves one instruction clearing the high half of low */
  43. #define DECLARE_ARGS(val, low, high) unsigned long low, high
  44. #define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32)
  45. #define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high)
  46. #else
  47. #define DECLARE_ARGS(val, low, high) unsigned long long val
  48. #define EAX_EDX_VAL(val, low, high) (val)
  49. #define EAX_EDX_RET(val, low, high) "=A" (val)
  50. #endif
  51. #ifdef CONFIG_TRACEPOINTS
  52. /*
  53. * Be very careful with includes. This header is prone to include loops.
  54. */
  55. #include <asm/atomic.h>
  56. #include <linux/tracepoint-defs.h>
  57. extern struct tracepoint __tracepoint_read_msr;
  58. extern struct tracepoint __tracepoint_write_msr;
  59. extern struct tracepoint __tracepoint_rdpmc;
  60. #define msr_tracepoint_active(t) static_key_false(&(t).key)
  61. extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
  62. extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
  63. extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
  64. #else
  65. #define msr_tracepoint_active(t) false
  66. static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
  67. static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
  68. static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
  69. #endif
  70. static inline unsigned long long native_read_msr(unsigned int msr)
  71. {
  72. DECLARE_ARGS(val, low, high);
  73. asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr));
  74. if (msr_tracepoint_active(__tracepoint_read_msr))
  75. do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0);
  76. return EAX_EDX_VAL(val, low, high);
  77. }
  78. static inline unsigned long long native_read_msr_safe(unsigned int msr,
  79. int *err)
  80. {
  81. DECLARE_ARGS(val, low, high);
  82. asm volatile("2: rdmsr ; xor %[err],%[err]\n"
  83. "1:\n\t"
  84. ".section .fixup,\"ax\"\n\t"
  85. "3: mov %[fault],%[err] ; jmp 1b\n\t"
  86. ".previous\n\t"
  87. _ASM_EXTABLE(2b, 3b)
  88. : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
  89. : "c" (msr), [fault] "i" (-EIO));
  90. if (msr_tracepoint_active(__tracepoint_read_msr))
  91. do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
  92. return EAX_EDX_VAL(val, low, high);
  93. }
  94. static inline void native_write_msr(unsigned int msr,
  95. unsigned low, unsigned high)
  96. {
  97. asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
  98. if (msr_tracepoint_active(__tracepoint_read_msr))
  99. do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
  100. }
  101. /* Can be uninlined because referenced by paravirt */
  102. notrace static inline int native_write_msr_safe(unsigned int msr,
  103. unsigned low, unsigned high)
  104. {
  105. int err;
  106. asm volatile("2: wrmsr ; xor %[err],%[err]\n"
  107. "1:\n\t"
  108. ".section .fixup,\"ax\"\n\t"
  109. "3: mov %[fault],%[err] ; jmp 1b\n\t"
  110. ".previous\n\t"
  111. _ASM_EXTABLE(2b, 3b)
  112. : [err] "=a" (err)
  113. : "c" (msr), "0" (low), "d" (high),
  114. [fault] "i" (-EIO)
  115. : "memory");
  116. if (msr_tracepoint_active(__tracepoint_read_msr))
  117. do_trace_write_msr(msr, ((u64)high << 32 | low), err);
  118. return err;
  119. }
  120. extern int rdmsr_safe_regs(u32 regs[8]);
  121. extern int wrmsr_safe_regs(u32 regs[8]);
  122. /**
  123. * rdtsc() - returns the current TSC without ordering constraints
  124. *
  125. * rdtsc() returns the result of RDTSC as a 64-bit integer. The
  126. * only ordering constraint it supplies is the ordering implied by
  127. * "asm volatile": it will put the RDTSC in the place you expect. The
  128. * CPU can and will speculatively execute that RDTSC, though, so the
  129. * results can be non-monotonic if compared on different CPUs.
  130. */
  131. static __always_inline unsigned long long rdtsc(void)
  132. {
  133. DECLARE_ARGS(val, low, high);
  134. asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
  135. return EAX_EDX_VAL(val, low, high);
  136. }
  137. /**
  138. * rdtsc_ordered() - read the current TSC in program order
  139. *
  140. * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
  141. * It is ordered like a load to a global in-memory counter. It should
  142. * be impossible to observe non-monotonic rdtsc_unordered() behavior
  143. * across multiple CPUs as long as the TSC is synced.
  144. */
  145. static __always_inline unsigned long long rdtsc_ordered(void)
  146. {
  147. /*
  148. * The RDTSC instruction is not ordered relative to memory
  149. * access. The Intel SDM and the AMD APM are both vague on this
  150. * point, but empirically an RDTSC instruction can be
  151. * speculatively executed before prior loads. An RDTSC
  152. * immediately after an appropriate barrier appears to be
  153. * ordered as a normal load, that is, it provides the same
  154. * ordering guarantees as reading from a global memory location
  155. * that some other imaginary CPU is updating continuously with a
  156. * time stamp.
  157. */
  158. alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
  159. "lfence", X86_FEATURE_LFENCE_RDTSC);
  160. return rdtsc();
  161. }
  162. /* Deprecated, keep it for a cycle for easier merging: */
  163. #define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0)
  164. static inline unsigned long long native_read_pmc(int counter)
  165. {
  166. DECLARE_ARGS(val, low, high);
  167. asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
  168. if (msr_tracepoint_active(__tracepoint_rdpmc))
  169. do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
  170. return EAX_EDX_VAL(val, low, high);
  171. }
  172. #ifdef CONFIG_PARAVIRT
  173. #include <asm/paravirt.h>
  174. #else
  175. #include <linux/errno.h>
  176. /*
  177. * Access to machine-specific registers (available on 586 and better only)
  178. * Note: the rd* operations modify the parameters directly (without using
  179. * pointer indirection), this allows gcc to optimize better
  180. */
  181. #define rdmsr(msr, low, high) \
  182. do { \
  183. u64 __val = native_read_msr((msr)); \
  184. (void)((low) = (u32)__val); \
  185. (void)((high) = (u32)(__val >> 32)); \
  186. } while (0)
  187. static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
  188. {
  189. native_write_msr(msr, low, high);
  190. }
  191. #define rdmsrl(msr, val) \
  192. ((val) = native_read_msr((msr)))
  193. static inline void wrmsrl(unsigned msr, u64 val)
  194. {
  195. native_write_msr(msr, (u32)val, (u32)(val >> 32));
  196. }
  197. /* wrmsr with exception handling */
  198. static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
  199. {
  200. return native_write_msr_safe(msr, low, high);
  201. }
  202. /* rdmsr with exception handling */
  203. #define rdmsr_safe(msr, low, high) \
  204. ({ \
  205. int __err; \
  206. u64 __val = native_read_msr_safe((msr), &__err); \
  207. (*low) = (u32)__val; \
  208. (*high) = (u32)(__val >> 32); \
  209. __err; \
  210. })
  211. static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
  212. {
  213. int err;
  214. *p = native_read_msr_safe(msr, &err);
  215. return err;
  216. }
  217. #define rdpmc(counter, low, high) \
  218. do { \
  219. u64 _l = native_read_pmc((counter)); \
  220. (low) = (u32)_l; \
  221. (high) = (u32)(_l >> 32); \
  222. } while (0)
  223. #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
  224. #endif /* !CONFIG_PARAVIRT */
  225. /*
  226. * 64-bit version of wrmsr_safe():
  227. */
  228. static inline int wrmsrl_safe(u32 msr, u64 val)
  229. {
  230. return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
  231. }
  232. #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
  233. #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
  234. struct msr *msrs_alloc(void);
  235. void msrs_free(struct msr *msrs);
  236. int msr_set_bit(u32 msr, u8 bit);
  237. int msr_clear_bit(u32 msr, u8 bit);
  238. #ifdef CONFIG_SMP
  239. int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  240. int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  241. int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
  242. int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
  243. void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  244. void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
  245. int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
  246. int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
  247. int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
  248. int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
  249. int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  250. int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
  251. #else /* CONFIG_SMP */
  252. static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
  253. {
  254. rdmsr(msr_no, *l, *h);
  255. return 0;
  256. }
  257. static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  258. {
  259. wrmsr(msr_no, l, h);
  260. return 0;
  261. }
  262. static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  263. {
  264. rdmsrl(msr_no, *q);
  265. return 0;
  266. }
  267. static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  268. {
  269. wrmsrl(msr_no, q);
  270. return 0;
  271. }
  272. static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  273. struct msr *msrs)
  274. {
  275. rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
  276. }
  277. static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
  278. struct msr *msrs)
  279. {
  280. wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
  281. }
  282. static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
  283. u32 *l, u32 *h)
  284. {
  285. return rdmsr_safe(msr_no, l, h);
  286. }
  287. static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
  288. {
  289. return wrmsr_safe(msr_no, l, h);
  290. }
  291. static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
  292. {
  293. return rdmsrl_safe(msr_no, q);
  294. }
  295. static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
  296. {
  297. return wrmsrl_safe(msr_no, q);
  298. }
  299. static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  300. {
  301. return rdmsr_safe_regs(regs);
  302. }
  303. static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
  304. {
  305. return wrmsr_safe_regs(regs);
  306. }
  307. #endif /* CONFIG_SMP */
  308. #endif /* __ASSEMBLY__ */
  309. #endif /* _ASM_X86_MSR_H */