mshyperv.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_MSHYPER_H
  3. #define _ASM_X86_MSHYPER_H
  4. #include <linux/types.h>
  5. #include <linux/atomic.h>
  6. #include <linux/nmi.h>
  7. #include <asm/io.h>
  8. #include <asm/hyperv-tlfs.h>
  9. #include <asm/nospec-branch.h>
  10. struct ms_hyperv_info {
  11. u32 features;
  12. u32 misc_features;
  13. u32 hints;
  14. u32 nested_features;
  15. u32 max_vp_index;
  16. u32 max_lp_index;
  17. };
  18. extern struct ms_hyperv_info ms_hyperv;
  19. /*
  20. * Generate the guest ID.
  21. */
  22. static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
  23. __u64 d_info2)
  24. {
  25. __u64 guest_id = 0;
  26. guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
  27. guest_id |= (d_info1 << 48);
  28. guest_id |= (kernel_version << 16);
  29. guest_id |= d_info2;
  30. return guest_id;
  31. }
  32. /* Free the message slot and signal end-of-message if required */
  33. static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
  34. {
  35. /*
  36. * On crash we're reading some other CPU's message page and we need
  37. * to be careful: this other CPU may already had cleared the header
  38. * and the host may already had delivered some other message there.
  39. * In case we blindly write msg->header.message_type we're going
  40. * to lose it. We can still lose a message of the same type but
  41. * we count on the fact that there can only be one
  42. * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
  43. * on crash.
  44. */
  45. if (cmpxchg(&msg->header.message_type, old_msg_type,
  46. HVMSG_NONE) != old_msg_type)
  47. return;
  48. /*
  49. * Make sure the write to MessageType (ie set to
  50. * HVMSG_NONE) happens before we read the
  51. * MessagePending and EOMing. Otherwise, the EOMing
  52. * will not deliver any more messages since there is
  53. * no empty slot
  54. */
  55. mb();
  56. if (msg->header.message_flags.msg_pending) {
  57. /*
  58. * This will cause message queue rescan to
  59. * possibly deliver another msg from the
  60. * hypervisor
  61. */
  62. wrmsrl(HV_X64_MSR_EOM, 0);
  63. }
  64. }
  65. #define hv_init_timer(timer, tick) wrmsrl(timer, tick)
  66. #define hv_init_timer_config(config, val) wrmsrl(config, val)
  67. #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
  68. #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
  69. #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
  70. #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
  71. #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
  72. #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
  73. #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
  74. #define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
  75. #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
  76. void hyperv_callback_vector(void);
  77. void hyperv_reenlightenment_vector(void);
  78. #ifdef CONFIG_TRACING
  79. #define trace_hyperv_callback_vector hyperv_callback_vector
  80. #endif
  81. void hyperv_vector_handler(struct pt_regs *regs);
  82. void hv_setup_vmbus_irq(void (*handler)(void));
  83. void hv_remove_vmbus_irq(void);
  84. void hv_setup_kexec_handler(void (*handler)(void));
  85. void hv_remove_kexec_handler(void);
  86. void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
  87. void hv_remove_crash_handler(void);
  88. /*
  89. * Routines for stimer0 Direct Mode handling.
  90. * On x86/x64, there are no percpu actions to take.
  91. */
  92. void hv_stimer0_vector_handler(struct pt_regs *regs);
  93. void hv_stimer0_callback_vector(void);
  94. int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
  95. void hv_remove_stimer0_irq(int irq);
  96. static inline void hv_enable_stimer0_percpu_irq(int irq) {}
  97. static inline void hv_disable_stimer0_percpu_irq(int irq) {}
  98. #if IS_ENABLED(CONFIG_HYPERV)
  99. extern struct clocksource *hyperv_cs;
  100. extern void *hv_hypercall_pg;
  101. static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
  102. {
  103. u64 input_address = input ? virt_to_phys(input) : 0;
  104. u64 output_address = output ? virt_to_phys(output) : 0;
  105. u64 hv_status;
  106. #ifdef CONFIG_X86_64
  107. if (!hv_hypercall_pg)
  108. return U64_MAX;
  109. __asm__ __volatile__("mov %4, %%r8\n"
  110. CALL_NOSPEC
  111. : "=a" (hv_status), ASM_CALL_CONSTRAINT,
  112. "+c" (control), "+d" (input_address)
  113. : "r" (output_address),
  114. THUNK_TARGET(hv_hypercall_pg)
  115. : "cc", "memory", "r8", "r9", "r10", "r11");
  116. #else
  117. u32 input_address_hi = upper_32_bits(input_address);
  118. u32 input_address_lo = lower_32_bits(input_address);
  119. u32 output_address_hi = upper_32_bits(output_address);
  120. u32 output_address_lo = lower_32_bits(output_address);
  121. if (!hv_hypercall_pg)
  122. return U64_MAX;
  123. __asm__ __volatile__(CALL_NOSPEC
  124. : "=A" (hv_status),
  125. "+c" (input_address_lo), ASM_CALL_CONSTRAINT
  126. : "A" (control),
  127. "b" (input_address_hi),
  128. "D"(output_address_hi), "S"(output_address_lo),
  129. THUNK_TARGET(hv_hypercall_pg)
  130. : "cc", "memory");
  131. #endif /* !x86_64 */
  132. return hv_status;
  133. }
  134. /* Fast hypercall with 8 bytes of input and no output */
  135. static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
  136. {
  137. u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
  138. #ifdef CONFIG_X86_64
  139. {
  140. __asm__ __volatile__(CALL_NOSPEC
  141. : "=a" (hv_status), ASM_CALL_CONSTRAINT,
  142. "+c" (control), "+d" (input1)
  143. : THUNK_TARGET(hv_hypercall_pg)
  144. : "cc", "r8", "r9", "r10", "r11");
  145. }
  146. #else
  147. {
  148. u32 input1_hi = upper_32_bits(input1);
  149. u32 input1_lo = lower_32_bits(input1);
  150. __asm__ __volatile__ (CALL_NOSPEC
  151. : "=A"(hv_status),
  152. "+c"(input1_lo),
  153. ASM_CALL_CONSTRAINT
  154. : "A" (control),
  155. "b" (input1_hi),
  156. THUNK_TARGET(hv_hypercall_pg)
  157. : "cc", "edi", "esi");
  158. }
  159. #endif
  160. return hv_status;
  161. }
  162. /*
  163. * Rep hypercalls. Callers of this functions are supposed to ensure that
  164. * rep_count and varhead_size comply with Hyper-V hypercall definition.
  165. */
  166. static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
  167. void *input, void *output)
  168. {
  169. u64 control = code;
  170. u64 status;
  171. u16 rep_comp;
  172. control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
  173. control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
  174. do {
  175. status = hv_do_hypercall(control, input, output);
  176. if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
  177. return status;
  178. /* Bits 32-43 of status have 'Reps completed' data. */
  179. rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
  180. HV_HYPERCALL_REP_COMP_OFFSET;
  181. control &= ~HV_HYPERCALL_REP_START_MASK;
  182. control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
  183. touch_nmi_watchdog();
  184. } while (rep_comp < rep_count);
  185. return status;
  186. }
  187. /*
  188. * Hypervisor's notion of virtual processor ID is different from
  189. * Linux' notion of CPU ID. This information can only be retrieved
  190. * in the context of the calling CPU. Setup a map for easy access
  191. * to this information.
  192. */
  193. extern u32 *hv_vp_index;
  194. extern u32 hv_max_vp_index;
  195. extern struct hv_vp_assist_page **hv_vp_assist_page;
  196. static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
  197. {
  198. if (!hv_vp_assist_page)
  199. return NULL;
  200. return hv_vp_assist_page[cpu];
  201. }
  202. /**
  203. * hv_cpu_number_to_vp_number() - Map CPU to VP.
  204. * @cpu_number: CPU number in Linux terms
  205. *
  206. * This function returns the mapping between the Linux processor
  207. * number and the hypervisor's virtual processor number, useful
  208. * in making hypercalls and such that talk about specific
  209. * processors.
  210. *
  211. * Return: Virtual processor number in Hyper-V terms
  212. */
  213. static inline int hv_cpu_number_to_vp_number(int cpu_number)
  214. {
  215. return hv_vp_index[cpu_number];
  216. }
  217. void hyperv_init(void);
  218. void hyperv_setup_mmu_ops(void);
  219. void hyper_alloc_mmu(void);
  220. void hyperv_report_panic(struct pt_regs *regs, long err);
  221. bool hv_is_hyperv_initialized(void);
  222. void hyperv_cleanup(void);
  223. void hyperv_reenlightenment_intr(struct pt_regs *regs);
  224. void set_hv_tscchange_cb(void (*cb)(void));
  225. void clear_hv_tscchange_cb(void);
  226. void hyperv_stop_tsc_emulation(void);
  227. #else /* CONFIG_HYPERV */
  228. static inline void hyperv_init(void) {}
  229. static inline bool hv_is_hyperv_initialized(void) { return false; }
  230. static inline void hyperv_cleanup(void) {}
  231. static inline void hyperv_setup_mmu_ops(void) {}
  232. static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
  233. static inline void clear_hv_tscchange_cb(void) {}
  234. static inline void hyperv_stop_tsc_emulation(void) {};
  235. static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
  236. {
  237. return NULL;
  238. }
  239. #endif /* CONFIG_HYPERV */
  240. #ifdef CONFIG_HYPERV_TSCPAGE
  241. struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
  242. static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
  243. u64 *cur_tsc)
  244. {
  245. u64 scale, offset;
  246. u32 sequence;
  247. /*
  248. * The protocol for reading Hyper-V TSC page is specified in Hypervisor
  249. * Top-Level Functional Specification ver. 3.0 and above. To get the
  250. * reference time we must do the following:
  251. * - READ ReferenceTscSequence
  252. * A special '0' value indicates the time source is unreliable and we
  253. * need to use something else. The currently published specification
  254. * versions (up to 4.0b) contain a mistake and wrongly claim '-1'
  255. * instead of '0' as the special value, see commit c35b82ef0294.
  256. * - ReferenceTime =
  257. * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
  258. * - READ ReferenceTscSequence again. In case its value has changed
  259. * since our first reading we need to discard ReferenceTime and repeat
  260. * the whole sequence as the hypervisor was updating the page in
  261. * between.
  262. */
  263. do {
  264. sequence = READ_ONCE(tsc_pg->tsc_sequence);
  265. if (!sequence)
  266. return U64_MAX;
  267. /*
  268. * Make sure we read sequence before we read other values from
  269. * TSC page.
  270. */
  271. smp_rmb();
  272. scale = READ_ONCE(tsc_pg->tsc_scale);
  273. offset = READ_ONCE(tsc_pg->tsc_offset);
  274. *cur_tsc = rdtsc_ordered();
  275. /*
  276. * Make sure we read sequence after we read all other values
  277. * from TSC page.
  278. */
  279. smp_rmb();
  280. } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
  281. return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
  282. }
  283. static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
  284. {
  285. u64 cur_tsc;
  286. return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
  287. }
  288. #else
  289. static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
  290. {
  291. return NULL;
  292. }
  293. static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
  294. u64 *cur_tsc)
  295. {
  296. BUG();
  297. return U64_MAX;
  298. }
  299. #endif
  300. #endif