hyperv.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*
  2. * KVM Microsoft Hyper-V emulation
  3. *
  4. * derived from arch/x86/kvm/x86.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
  11. *
  12. * Authors:
  13. * Avi Kivity <avi@qumranet.com>
  14. * Yaniv Kamay <yaniv@qumranet.com>
  15. * Amit Shah <amit.shah@qumranet.com>
  16. * Ben-Ami Yassour <benami@il.ibm.com>
  17. * Andrey Smetanin <asmetanin@virtuozzo.com>
  18. *
  19. * This work is licensed under the terms of the GNU GPL, version 2. See
  20. * the COPYING file in the top-level directory.
  21. *
  22. */
  23. #include "x86.h"
  24. #include "lapic.h"
  25. #include "ioapic.h"
  26. #include "hyperv.h"
  27. #include <linux/kvm_host.h>
  28. #include <linux/highmem.h>
  29. #include <linux/sched/cputime.h>
  30. #include <asm/apicdef.h>
  31. #include <trace/events/kvm.h>
  32. #include "trace.h"
  33. static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
  34. {
  35. return atomic64_read(&synic->sint[sint]);
  36. }
  37. static inline int synic_get_sint_vector(u64 sint_value)
  38. {
  39. if (sint_value & HV_SYNIC_SINT_MASKED)
  40. return -1;
  41. return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
  42. }
  43. static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
  44. int vector)
  45. {
  46. int i;
  47. for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
  48. if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
  49. return true;
  50. }
  51. return false;
  52. }
  53. static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
  54. int vector)
  55. {
  56. int i;
  57. u64 sint_value;
  58. for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
  59. sint_value = synic_read_sint(synic, i);
  60. if (synic_get_sint_vector(sint_value) == vector &&
  61. sint_value & HV_SYNIC_SINT_AUTO_EOI)
  62. return true;
  63. }
  64. return false;
  65. }
  66. static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
  67. u64 data, bool host)
  68. {
  69. int vector;
  70. vector = data & HV_SYNIC_SINT_VECTOR_MASK;
  71. if (vector < 16 && !host)
  72. return 1;
  73. /*
  74. * Guest may configure multiple SINTs to use the same vector, so
  75. * we maintain a bitmap of vectors handled by synic, and a
  76. * bitmap of vectors with auto-eoi behavior. The bitmaps are
  77. * updated here, and atomically queried on fast paths.
  78. */
  79. atomic64_set(&synic->sint[sint], data);
  80. if (synic_has_vector_connected(synic, vector))
  81. __set_bit(vector, synic->vec_bitmap);
  82. else
  83. __clear_bit(vector, synic->vec_bitmap);
  84. if (synic_has_vector_auto_eoi(synic, vector))
  85. __set_bit(vector, synic->auto_eoi_bitmap);
  86. else
  87. __clear_bit(vector, synic->auto_eoi_bitmap);
  88. /* Load SynIC vectors into EOI exit bitmap */
  89. kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
  90. return 0;
  91. }
  92. static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
  93. {
  94. struct kvm_vcpu *vcpu = NULL;
  95. int i;
  96. if (vpidx < KVM_MAX_VCPUS)
  97. vcpu = kvm_get_vcpu(kvm, vpidx);
  98. if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
  99. return vcpu;
  100. kvm_for_each_vcpu(i, vcpu, kvm)
  101. if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
  102. return vcpu;
  103. return NULL;
  104. }
  105. static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
  106. {
  107. struct kvm_vcpu *vcpu;
  108. struct kvm_vcpu_hv_synic *synic;
  109. vcpu = get_vcpu_by_vpidx(kvm, vpidx);
  110. if (!vcpu)
  111. return NULL;
  112. synic = vcpu_to_synic(vcpu);
  113. return (synic->active) ? synic : NULL;
  114. }
  115. static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
  116. u32 sint)
  117. {
  118. struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
  119. struct page *page;
  120. gpa_t gpa;
  121. struct hv_message *msg;
  122. struct hv_message_page *msg_page;
  123. gpa = synic->msg_page & PAGE_MASK;
  124. page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
  125. if (is_error_page(page)) {
  126. vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
  127. gpa);
  128. return;
  129. }
  130. msg_page = kmap_atomic(page);
  131. msg = &msg_page->sint_message[sint];
  132. msg->header.message_flags.msg_pending = 0;
  133. kunmap_atomic(msg_page);
  134. kvm_release_page_dirty(page);
  135. kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
  136. }
  137. static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
  138. {
  139. struct kvm *kvm = vcpu->kvm;
  140. struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
  141. struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
  142. struct kvm_vcpu_hv_stimer *stimer;
  143. int gsi, idx, stimers_pending;
  144. trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
  145. if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
  146. synic_clear_sint_msg_pending(synic, sint);
  147. /* Try to deliver pending Hyper-V SynIC timers messages */
  148. stimers_pending = 0;
  149. for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
  150. stimer = &hv_vcpu->stimer[idx];
  151. if (stimer->msg_pending &&
  152. (stimer->config & HV_STIMER_ENABLE) &&
  153. HV_STIMER_SINT(stimer->config) == sint) {
  154. set_bit(stimer->index,
  155. hv_vcpu->stimer_pending_bitmap);
  156. stimers_pending++;
  157. }
  158. }
  159. if (stimers_pending)
  160. kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
  161. idx = srcu_read_lock(&kvm->irq_srcu);
  162. gsi = atomic_read(&synic->sint_to_gsi[sint]);
  163. if (gsi != -1)
  164. kvm_notify_acked_gsi(kvm, gsi);
  165. srcu_read_unlock(&kvm->irq_srcu, idx);
  166. }
  167. static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
  168. {
  169. struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
  170. struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
  171. hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
  172. hv_vcpu->exit.u.synic.msr = msr;
  173. hv_vcpu->exit.u.synic.control = synic->control;
  174. hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
  175. hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
  176. kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
  177. }
  178. static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
  179. u32 msr, u64 data, bool host)
  180. {
  181. struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
  182. int ret;
  183. if (!synic->active)
  184. return 1;
  185. trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
  186. ret = 0;
  187. switch (msr) {
  188. case HV_X64_MSR_SCONTROL:
  189. synic->control = data;
  190. if (!host)
  191. synic_exit(synic, msr);
  192. break;
  193. case HV_X64_MSR_SVERSION:
  194. if (!host) {
  195. ret = 1;
  196. break;
  197. }
  198. synic->version = data;
  199. break;
  200. case HV_X64_MSR_SIEFP:
  201. if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
  202. !synic->dont_zero_synic_pages)
  203. if (kvm_clear_guest(vcpu->kvm,
  204. data & PAGE_MASK, PAGE_SIZE)) {
  205. ret = 1;
  206. break;
  207. }
  208. synic->evt_page = data;
  209. if (!host)
  210. synic_exit(synic, msr);
  211. break;
  212. case HV_X64_MSR_SIMP:
  213. if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
  214. !synic->dont_zero_synic_pages)
  215. if (kvm_clear_guest(vcpu->kvm,
  216. data & PAGE_MASK, PAGE_SIZE)) {
  217. ret = 1;
  218. break;
  219. }
  220. synic->msg_page = data;
  221. if (!host)
  222. synic_exit(synic, msr);
  223. break;
  224. case HV_X64_MSR_EOM: {
  225. int i;
  226. for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
  227. kvm_hv_notify_acked_sint(vcpu, i);
  228. break;
  229. }
  230. case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
  231. ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
  232. break;
  233. default:
  234. ret = 1;
  235. break;
  236. }
  237. return ret;
  238. }
  239. static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
  240. {
  241. int ret;
  242. if (!synic->active)
  243. return 1;
  244. ret = 0;
  245. switch (msr) {
  246. case HV_X64_MSR_SCONTROL:
  247. *pdata = synic->control;
  248. break;
  249. case HV_X64_MSR_SVERSION:
  250. *pdata = synic->version;
  251. break;
  252. case HV_X64_MSR_SIEFP:
  253. *pdata = synic->evt_page;
  254. break;
  255. case HV_X64_MSR_SIMP:
  256. *pdata = synic->msg_page;
  257. break;
  258. case HV_X64_MSR_EOM:
  259. *pdata = 0;
  260. break;
  261. case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
  262. *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
  263. break;
  264. default:
  265. ret = 1;
  266. break;
  267. }
  268. return ret;
  269. }
  270. static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
  271. {
  272. struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
  273. struct kvm_lapic_irq irq;
  274. int ret, vector;
  275. if (sint >= ARRAY_SIZE(synic->sint))
  276. return -EINVAL;
  277. vector = synic_get_sint_vector(synic_read_sint(synic, sint));
  278. if (vector < 0)
  279. return -ENOENT;
  280. memset(&irq, 0, sizeof(irq));
  281. irq.shorthand = APIC_DEST_SELF;
  282. irq.dest_mode = APIC_DEST_PHYSICAL;
  283. irq.delivery_mode = APIC_DM_FIXED;
  284. irq.vector = vector;
  285. irq.level = 1;
  286. ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
  287. trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
  288. return ret;
  289. }
  290. int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
  291. {
  292. struct kvm_vcpu_hv_synic *synic;
  293. synic = synic_get(kvm, vpidx);
  294. if (!synic)
  295. return -EINVAL;
  296. return synic_set_irq(synic, sint);
  297. }
  298. void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
  299. {
  300. struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
  301. int i;
  302. trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
  303. for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
  304. if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
  305. kvm_hv_notify_acked_sint(vcpu, i);
  306. }
  307. static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
  308. {
  309. struct kvm_vcpu_hv_synic *synic;
  310. synic = synic_get(kvm, vpidx);
  311. if (!synic)
  312. return -EINVAL;
  313. if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
  314. return -EINVAL;
  315. atomic_set(&synic->sint_to_gsi[sint], gsi);
  316. return 0;
  317. }
  318. void kvm_hv_irq_routing_update(struct kvm *kvm)
  319. {
  320. struct kvm_irq_routing_table *irq_rt;
  321. struct kvm_kernel_irq_routing_entry *e;
  322. u32 gsi;
  323. irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
  324. lockdep_is_held(&kvm->irq_lock));
  325. for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
  326. hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
  327. if (e->type == KVM_IRQ_ROUTING_HV_SINT)
  328. kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
  329. e->hv_sint.sint, gsi);
  330. }
  331. }
  332. }
  333. static void synic_init(struct kvm_vcpu_hv_synic *synic)
  334. {
  335. int i;
  336. memset(synic, 0, sizeof(*synic));
  337. synic->version = HV_SYNIC_VERSION_1;
  338. for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
  339. atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
  340. atomic_set(&synic->sint_to_gsi[i], -1);
  341. }
  342. }
  343. static u64 get_time_ref_counter(struct kvm *kvm)
  344. {
  345. struct kvm_hv *hv = &kvm->arch.hyperv;
  346. struct kvm_vcpu *vcpu;
  347. u64 tsc;
  348. /*
  349. * The guest has not set up the TSC page or the clock isn't
  350. * stable, fall back to get_kvmclock_ns.
  351. */
  352. if (!hv->tsc_ref.tsc_sequence)
  353. return div_u64(get_kvmclock_ns(kvm), 100);
  354. vcpu = kvm_get_vcpu(kvm, 0);
  355. tsc = kvm_read_l1_tsc(vcpu, rdtsc());
  356. return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
  357. + hv->tsc_ref.tsc_offset;
  358. }
  359. static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
  360. bool vcpu_kick)
  361. {
  362. struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
  363. set_bit(stimer->index,
  364. vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
  365. kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
  366. if (vcpu_kick)
  367. kvm_vcpu_kick(vcpu);
  368. }
  369. static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
  370. {
  371. struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
  372. trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
  373. stimer->index);
  374. hrtimer_cancel(&stimer->timer);
  375. clear_bit(stimer->index,
  376. vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
  377. stimer->msg_pending = false;
  378. stimer->exp_time = 0;
  379. }
  380. static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
  381. {
  382. struct kvm_vcpu_hv_stimer *stimer;
  383. stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
  384. trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
  385. stimer->index);
  386. stimer_mark_pending(stimer, true);
  387. return HRTIMER_NORESTART;
  388. }
  389. /*
  390. * stimer_start() assumptions:
  391. * a) stimer->count is not equal to 0
  392. * b) stimer->config has HV_STIMER_ENABLE flag
  393. */
  394. static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
  395. {
  396. u64 time_now;
  397. ktime_t ktime_now;
  398. time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
  399. ktime_now = ktime_get();
  400. if (stimer->config & HV_STIMER_PERIODIC) {
  401. if (stimer->exp_time) {
  402. if (time_now >= stimer->exp_time) {
  403. u64 remainder;
  404. div64_u64_rem(time_now - stimer->exp_time,
  405. stimer->count, &remainder);
  406. stimer->exp_time =
  407. time_now + (stimer->count - remainder);
  408. }
  409. } else
  410. stimer->exp_time = time_now + stimer->count;
  411. trace_kvm_hv_stimer_start_periodic(
  412. stimer_to_vcpu(stimer)->vcpu_id,
  413. stimer->index,
  414. time_now, stimer->exp_time);
  415. hrtimer_start(&stimer->timer,
  416. ktime_add_ns(ktime_now,
  417. 100 * (stimer->exp_time - time_now)),
  418. HRTIMER_MODE_ABS);
  419. return 0;
  420. }
  421. stimer->exp_time = stimer->count;
  422. if (time_now >= stimer->count) {
  423. /*
  424. * Expire timer according to Hypervisor Top-Level Functional
  425. * specification v4(15.3.1):
  426. * "If a one shot is enabled and the specified count is in
  427. * the past, it will expire immediately."
  428. */
  429. stimer_mark_pending(stimer, false);
  430. return 0;
  431. }
  432. trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
  433. stimer->index,
  434. time_now, stimer->count);
  435. hrtimer_start(&stimer->timer,
  436. ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
  437. HRTIMER_MODE_ABS);
  438. return 0;
  439. }
  440. static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
  441. bool host)
  442. {
  443. trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
  444. stimer->index, config, host);
  445. stimer_cleanup(stimer);
  446. if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
  447. config &= ~HV_STIMER_ENABLE;
  448. stimer->config = config;
  449. stimer_mark_pending(stimer, false);
  450. return 0;
  451. }
  452. static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
  453. bool host)
  454. {
  455. trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
  456. stimer->index, count, host);
  457. stimer_cleanup(stimer);
  458. stimer->count = count;
  459. if (stimer->count == 0)
  460. stimer->config &= ~HV_STIMER_ENABLE;
  461. else if (stimer->config & HV_STIMER_AUTOENABLE)
  462. stimer->config |= HV_STIMER_ENABLE;
  463. stimer_mark_pending(stimer, false);
  464. return 0;
  465. }
  466. static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
  467. {
  468. *pconfig = stimer->config;
  469. return 0;
  470. }
  471. static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
  472. {
  473. *pcount = stimer->count;
  474. return 0;
  475. }
  476. static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
  477. struct hv_message *src_msg)
  478. {
  479. struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
  480. struct page *page;
  481. gpa_t gpa;
  482. struct hv_message *dst_msg;
  483. int r;
  484. struct hv_message_page *msg_page;
  485. if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
  486. return -ENOENT;
  487. gpa = synic->msg_page & PAGE_MASK;
  488. page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
  489. if (is_error_page(page))
  490. return -EFAULT;
  491. msg_page = kmap_atomic(page);
  492. dst_msg = &msg_page->sint_message[sint];
  493. if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
  494. src_msg->header.message_type) != HVMSG_NONE) {
  495. dst_msg->header.message_flags.msg_pending = 1;
  496. r = -EAGAIN;
  497. } else {
  498. memcpy(&dst_msg->u.payload, &src_msg->u.payload,
  499. src_msg->header.payload_size);
  500. dst_msg->header.message_type = src_msg->header.message_type;
  501. dst_msg->header.payload_size = src_msg->header.payload_size;
  502. r = synic_set_irq(synic, sint);
  503. if (r >= 1)
  504. r = 0;
  505. else if (r == 0)
  506. r = -EFAULT;
  507. }
  508. kunmap_atomic(msg_page);
  509. kvm_release_page_dirty(page);
  510. kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
  511. return r;
  512. }
  513. static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
  514. {
  515. struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
  516. struct hv_message *msg = &stimer->msg;
  517. struct hv_timer_message_payload *payload =
  518. (struct hv_timer_message_payload *)&msg->u.payload;
  519. payload->expiration_time = stimer->exp_time;
  520. payload->delivery_time = get_time_ref_counter(vcpu->kvm);
  521. return synic_deliver_msg(vcpu_to_synic(vcpu),
  522. HV_STIMER_SINT(stimer->config), msg);
  523. }
  524. static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
  525. {
  526. int r;
  527. stimer->msg_pending = true;
  528. r = stimer_send_msg(stimer);
  529. trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
  530. stimer->index, r);
  531. if (!r) {
  532. stimer->msg_pending = false;
  533. if (!(stimer->config & HV_STIMER_PERIODIC))
  534. stimer->config &= ~HV_STIMER_ENABLE;
  535. }
  536. }
  537. void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
  538. {
  539. struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
  540. struct kvm_vcpu_hv_stimer *stimer;
  541. u64 time_now, exp_time;
  542. int i;
  543. for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
  544. if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
  545. stimer = &hv_vcpu->stimer[i];
  546. if (stimer->config & HV_STIMER_ENABLE) {
  547. exp_time = stimer->exp_time;
  548. if (exp_time) {
  549. time_now =
  550. get_time_ref_counter(vcpu->kvm);
  551. if (time_now >= exp_time)
  552. stimer_expiration(stimer);
  553. }
  554. if ((stimer->config & HV_STIMER_ENABLE) &&
  555. stimer->count) {
  556. if (!stimer->msg_pending)
  557. stimer_start(stimer);
  558. } else
  559. stimer_cleanup(stimer);
  560. }
  561. }
  562. }
  563. void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
  564. {
  565. struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
  566. int i;
  567. for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
  568. stimer_cleanup(&hv_vcpu->stimer[i]);
  569. }
  570. static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
  571. {
  572. struct hv_message *msg = &stimer->msg;
  573. struct hv_timer_message_payload *payload =
  574. (struct hv_timer_message_payload *)&msg->u.payload;
  575. memset(&msg->header, 0, sizeof(msg->header));
  576. msg->header.message_type = HVMSG_TIMER_EXPIRED;
  577. msg->header.payload_size = sizeof(*payload);
  578. payload->timer_index = stimer->index;
  579. payload->expiration_time = 0;
  580. payload->delivery_time = 0;
  581. }
  582. static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
  583. {
  584. memset(stimer, 0, sizeof(*stimer));
  585. stimer->index = timer_index;
  586. hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  587. stimer->timer.function = stimer_timer_callback;
  588. stimer_prepare_msg(stimer);
  589. }
  590. void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
  591. {
  592. struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
  593. int i;
  594. synic_init(&hv_vcpu->synic);
  595. bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
  596. for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
  597. stimer_init(&hv_vcpu->stimer[i], i);
  598. }
  599. void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
  600. {
  601. struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
  602. hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
  603. }
  604. int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
  605. {
  606. struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
  607. /*
  608. * Hyper-V SynIC auto EOI SINT's are
  609. * not compatible with APICV, so deactivate APICV
  610. */
  611. kvm_vcpu_deactivate_apicv(vcpu);
  612. synic->active = true;
  613. synic->dont_zero_synic_pages = dont_zero_synic_pages;
  614. return 0;
  615. }
  616. static bool kvm_hv_msr_partition_wide(u32 msr)
  617. {
  618. bool r = false;
  619. switch (msr) {
  620. case HV_X64_MSR_GUEST_OS_ID:
  621. case HV_X64_MSR_HYPERCALL:
  622. case HV_X64_MSR_REFERENCE_TSC:
  623. case HV_X64_MSR_TIME_REF_COUNT:
  624. case HV_X64_MSR_CRASH_CTL:
  625. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  626. case HV_X64_MSR_RESET:
  627. r = true;
  628. break;
  629. }
  630. return r;
  631. }
  632. static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
  633. u32 index, u64 *pdata)
  634. {
  635. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  636. if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
  637. return -EINVAL;
  638. *pdata = hv->hv_crash_param[index];
  639. return 0;
  640. }
  641. static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
  642. {
  643. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  644. *pdata = hv->hv_crash_ctl;
  645. return 0;
  646. }
  647. static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
  648. {
  649. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  650. if (host)
  651. hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
  652. if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
  653. vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
  654. hv->hv_crash_param[0],
  655. hv->hv_crash_param[1],
  656. hv->hv_crash_param[2],
  657. hv->hv_crash_param[3],
  658. hv->hv_crash_param[4]);
  659. /* Send notification about crash to user space */
  660. kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
  661. }
  662. return 0;
  663. }
  664. static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
  665. u32 index, u64 data)
  666. {
  667. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  668. if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
  669. return -EINVAL;
  670. hv->hv_crash_param[index] = data;
  671. return 0;
  672. }
  673. /*
  674. * The kvmclock and Hyper-V TSC page use similar formulas, and converting
  675. * between them is possible:
  676. *
  677. * kvmclock formula:
  678. * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
  679. * + system_time
  680. *
  681. * Hyper-V formula:
  682. * nsec/100 = ticks * scale / 2^64 + offset
  683. *
  684. * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
  685. * By dividing the kvmclock formula by 100 and equating what's left we get:
  686. * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
  687. * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
  688. * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
  689. *
  690. * Now expand the kvmclock formula and divide by 100:
  691. * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
  692. * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
  693. * + system_time
  694. * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
  695. * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
  696. * + system_time / 100
  697. *
  698. * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
  699. * nsec/100 = ticks * scale / 2^64
  700. * - tsc_timestamp * scale / 2^64
  701. * + system_time / 100
  702. *
  703. * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
  704. * offset = system_time / 100 - tsc_timestamp * scale / 2^64
  705. *
  706. * These two equivalencies are implemented in this function.
  707. */
  708. static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
  709. HV_REFERENCE_TSC_PAGE *tsc_ref)
  710. {
  711. u64 max_mul;
  712. if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
  713. return false;
  714. /*
  715. * check if scale would overflow, if so we use the time ref counter
  716. * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
  717. * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
  718. * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
  719. */
  720. max_mul = 100ull << (32 - hv_clock->tsc_shift);
  721. if (hv_clock->tsc_to_system_mul >= max_mul)
  722. return false;
  723. /*
  724. * Otherwise compute the scale and offset according to the formulas
  725. * derived above.
  726. */
  727. tsc_ref->tsc_scale =
  728. mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
  729. hv_clock->tsc_to_system_mul,
  730. 100);
  731. tsc_ref->tsc_offset = hv_clock->system_time;
  732. do_div(tsc_ref->tsc_offset, 100);
  733. tsc_ref->tsc_offset -=
  734. mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
  735. return true;
  736. }
  737. void kvm_hv_setup_tsc_page(struct kvm *kvm,
  738. struct pvclock_vcpu_time_info *hv_clock)
  739. {
  740. struct kvm_hv *hv = &kvm->arch.hyperv;
  741. u32 tsc_seq;
  742. u64 gfn;
  743. BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
  744. BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
  745. if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
  746. return;
  747. mutex_lock(&kvm->arch.hyperv.hv_lock);
  748. if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
  749. goto out_unlock;
  750. gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
  751. /*
  752. * Because the TSC parameters only vary when there is a
  753. * change in the master clock, do not bother with caching.
  754. */
  755. if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
  756. &tsc_seq, sizeof(tsc_seq))))
  757. goto out_unlock;
  758. /*
  759. * While we're computing and writing the parameters, force the
  760. * guest to use the time reference count MSR.
  761. */
  762. hv->tsc_ref.tsc_sequence = 0;
  763. if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
  764. &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
  765. goto out_unlock;
  766. if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
  767. goto out_unlock;
  768. /* Ensure sequence is zero before writing the rest of the struct. */
  769. smp_wmb();
  770. if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
  771. goto out_unlock;
  772. /*
  773. * Now switch to the TSC page mechanism by writing the sequence.
  774. */
  775. tsc_seq++;
  776. if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
  777. tsc_seq = 1;
  778. /* Write the struct entirely before the non-zero sequence. */
  779. smp_wmb();
  780. hv->tsc_ref.tsc_sequence = tsc_seq;
  781. kvm_write_guest(kvm, gfn_to_gpa(gfn),
  782. &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
  783. out_unlock:
  784. mutex_unlock(&kvm->arch.hyperv.hv_lock);
  785. }
  786. static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
  787. bool host)
  788. {
  789. struct kvm *kvm = vcpu->kvm;
  790. struct kvm_hv *hv = &kvm->arch.hyperv;
  791. switch (msr) {
  792. case HV_X64_MSR_GUEST_OS_ID:
  793. hv->hv_guest_os_id = data;
  794. /* setting guest os id to zero disables hypercall page */
  795. if (!hv->hv_guest_os_id)
  796. hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  797. break;
  798. case HV_X64_MSR_HYPERCALL: {
  799. u64 gfn;
  800. unsigned long addr;
  801. u8 instructions[4];
  802. /* if guest os id is not set hypercall should remain disabled */
  803. if (!hv->hv_guest_os_id)
  804. break;
  805. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  806. hv->hv_hypercall = data;
  807. break;
  808. }
  809. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  810. addr = gfn_to_hva(kvm, gfn);
  811. if (kvm_is_error_hva(addr))
  812. return 1;
  813. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  814. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  815. if (__copy_to_user((void __user *)addr, instructions, 4))
  816. return 1;
  817. hv->hv_hypercall = data;
  818. mark_page_dirty(kvm, gfn);
  819. break;
  820. }
  821. case HV_X64_MSR_REFERENCE_TSC:
  822. hv->hv_tsc_page = data;
  823. if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
  824. kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
  825. break;
  826. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  827. return kvm_hv_msr_set_crash_data(vcpu,
  828. msr - HV_X64_MSR_CRASH_P0,
  829. data);
  830. case HV_X64_MSR_CRASH_CTL:
  831. return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
  832. case HV_X64_MSR_RESET:
  833. if (data == 1) {
  834. vcpu_debug(vcpu, "hyper-v reset requested\n");
  835. kvm_make_request(KVM_REQ_HV_RESET, vcpu);
  836. }
  837. break;
  838. default:
  839. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  840. msr, data);
  841. return 1;
  842. }
  843. return 0;
  844. }
  845. /* Calculate cpu time spent by current task in 100ns units */
  846. static u64 current_task_runtime_100ns(void)
  847. {
  848. u64 utime, stime;
  849. task_cputime_adjusted(current, &utime, &stime);
  850. return div_u64(utime + stime, 100);
  851. }
  852. static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
  853. {
  854. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  855. switch (msr) {
  856. case HV_X64_MSR_VP_INDEX:
  857. if (!host)
  858. return 1;
  859. hv->vp_index = (u32)data;
  860. break;
  861. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  862. u64 gfn;
  863. unsigned long addr;
  864. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  865. hv->hv_vapic = data;
  866. if (kvm_lapic_enable_pv_eoi(vcpu, 0))
  867. return 1;
  868. break;
  869. }
  870. gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
  871. addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
  872. if (kvm_is_error_hva(addr))
  873. return 1;
  874. if (__clear_user((void __user *)addr, PAGE_SIZE))
  875. return 1;
  876. hv->hv_vapic = data;
  877. kvm_vcpu_mark_page_dirty(vcpu, gfn);
  878. if (kvm_lapic_enable_pv_eoi(vcpu,
  879. gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
  880. return 1;
  881. break;
  882. }
  883. case HV_X64_MSR_EOI:
  884. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  885. case HV_X64_MSR_ICR:
  886. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  887. case HV_X64_MSR_TPR:
  888. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  889. case HV_X64_MSR_VP_RUNTIME:
  890. if (!host)
  891. return 1;
  892. hv->runtime_offset = data - current_task_runtime_100ns();
  893. break;
  894. case HV_X64_MSR_SCONTROL:
  895. case HV_X64_MSR_SVERSION:
  896. case HV_X64_MSR_SIEFP:
  897. case HV_X64_MSR_SIMP:
  898. case HV_X64_MSR_EOM:
  899. case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
  900. return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
  901. case HV_X64_MSR_STIMER0_CONFIG:
  902. case HV_X64_MSR_STIMER1_CONFIG:
  903. case HV_X64_MSR_STIMER2_CONFIG:
  904. case HV_X64_MSR_STIMER3_CONFIG: {
  905. int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
  906. return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
  907. data, host);
  908. }
  909. case HV_X64_MSR_STIMER0_COUNT:
  910. case HV_X64_MSR_STIMER1_COUNT:
  911. case HV_X64_MSR_STIMER2_COUNT:
  912. case HV_X64_MSR_STIMER3_COUNT: {
  913. int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
  914. return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
  915. data, host);
  916. }
  917. default:
  918. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  919. msr, data);
  920. return 1;
  921. }
  922. return 0;
  923. }
  924. static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  925. {
  926. u64 data = 0;
  927. struct kvm *kvm = vcpu->kvm;
  928. struct kvm_hv *hv = &kvm->arch.hyperv;
  929. switch (msr) {
  930. case HV_X64_MSR_GUEST_OS_ID:
  931. data = hv->hv_guest_os_id;
  932. break;
  933. case HV_X64_MSR_HYPERCALL:
  934. data = hv->hv_hypercall;
  935. break;
  936. case HV_X64_MSR_TIME_REF_COUNT:
  937. data = get_time_ref_counter(kvm);
  938. break;
  939. case HV_X64_MSR_REFERENCE_TSC:
  940. data = hv->hv_tsc_page;
  941. break;
  942. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  943. return kvm_hv_msr_get_crash_data(vcpu,
  944. msr - HV_X64_MSR_CRASH_P0,
  945. pdata);
  946. case HV_X64_MSR_CRASH_CTL:
  947. return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
  948. case HV_X64_MSR_RESET:
  949. data = 0;
  950. break;
  951. default:
  952. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  953. return 1;
  954. }
  955. *pdata = data;
  956. return 0;
  957. }
  958. static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  959. {
  960. u64 data = 0;
  961. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  962. switch (msr) {
  963. case HV_X64_MSR_VP_INDEX:
  964. data = hv->vp_index;
  965. break;
  966. case HV_X64_MSR_EOI:
  967. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  968. case HV_X64_MSR_ICR:
  969. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  970. case HV_X64_MSR_TPR:
  971. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  972. case HV_X64_MSR_APIC_ASSIST_PAGE:
  973. data = hv->hv_vapic;
  974. break;
  975. case HV_X64_MSR_VP_RUNTIME:
  976. data = current_task_runtime_100ns() + hv->runtime_offset;
  977. break;
  978. case HV_X64_MSR_SCONTROL:
  979. case HV_X64_MSR_SVERSION:
  980. case HV_X64_MSR_SIEFP:
  981. case HV_X64_MSR_SIMP:
  982. case HV_X64_MSR_EOM:
  983. case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
  984. return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
  985. case HV_X64_MSR_STIMER0_CONFIG:
  986. case HV_X64_MSR_STIMER1_CONFIG:
  987. case HV_X64_MSR_STIMER2_CONFIG:
  988. case HV_X64_MSR_STIMER3_CONFIG: {
  989. int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
  990. return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
  991. pdata);
  992. }
  993. case HV_X64_MSR_STIMER0_COUNT:
  994. case HV_X64_MSR_STIMER1_COUNT:
  995. case HV_X64_MSR_STIMER2_COUNT:
  996. case HV_X64_MSR_STIMER3_COUNT: {
  997. int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
  998. return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
  999. pdata);
  1000. }
  1001. case HV_X64_MSR_TSC_FREQUENCY:
  1002. data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
  1003. break;
  1004. case HV_X64_MSR_APIC_FREQUENCY:
  1005. data = APIC_BUS_FREQUENCY;
  1006. break;
  1007. default:
  1008. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  1009. return 1;
  1010. }
  1011. *pdata = data;
  1012. return 0;
  1013. }
  1014. int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
  1015. {
  1016. if (kvm_hv_msr_partition_wide(msr)) {
  1017. int r;
  1018. mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
  1019. r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
  1020. mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
  1021. return r;
  1022. } else
  1023. return kvm_hv_set_msr(vcpu, msr, data, host);
  1024. }
  1025. int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  1026. {
  1027. if (kvm_hv_msr_partition_wide(msr)) {
  1028. int r;
  1029. mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
  1030. r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
  1031. mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
  1032. return r;
  1033. } else
  1034. return kvm_hv_get_msr(vcpu, msr, pdata);
  1035. }
  1036. bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  1037. {
  1038. return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
  1039. }
  1040. static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
  1041. {
  1042. bool longmode;
  1043. longmode = is_64_bit_mode(vcpu);
  1044. if (longmode)
  1045. kvm_register_write(vcpu, VCPU_REGS_RAX, result);
  1046. else {
  1047. kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
  1048. kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
  1049. }
  1050. }
  1051. static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
  1052. {
  1053. struct kvm_run *run = vcpu->run;
  1054. kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
  1055. return 1;
  1056. }
  1057. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  1058. {
  1059. u64 param, ingpa, outgpa, ret;
  1060. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  1061. bool fast, longmode;
  1062. /*
  1063. * hypercall generates UD from non zero cpl and real mode
  1064. * per HYPER-V spec
  1065. */
  1066. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  1067. kvm_queue_exception(vcpu, UD_VECTOR);
  1068. return 1;
  1069. }
  1070. longmode = is_64_bit_mode(vcpu);
  1071. if (!longmode) {
  1072. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  1073. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  1074. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  1075. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  1076. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  1077. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  1078. }
  1079. #ifdef CONFIG_X86_64
  1080. else {
  1081. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  1082. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  1083. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  1084. }
  1085. #endif
  1086. code = param & 0xffff;
  1087. fast = (param >> 16) & 0x1;
  1088. rep_cnt = (param >> 32) & 0xfff;
  1089. rep_idx = (param >> 48) & 0xfff;
  1090. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  1091. /* Hypercall continuation is not supported yet */
  1092. if (rep_cnt || rep_idx) {
  1093. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  1094. goto set_result;
  1095. }
  1096. switch (code) {
  1097. case HVCALL_NOTIFY_LONG_SPIN_WAIT:
  1098. kvm_vcpu_on_spin(vcpu, false);
  1099. break;
  1100. case HVCALL_POST_MESSAGE:
  1101. case HVCALL_SIGNAL_EVENT:
  1102. /* don't bother userspace if it has no way to handle it */
  1103. if (!vcpu_to_synic(vcpu)->active) {
  1104. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  1105. break;
  1106. }
  1107. vcpu->run->exit_reason = KVM_EXIT_HYPERV;
  1108. vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
  1109. vcpu->run->hyperv.u.hcall.input = param;
  1110. vcpu->run->hyperv.u.hcall.params[0] = ingpa;
  1111. vcpu->run->hyperv.u.hcall.params[1] = outgpa;
  1112. vcpu->arch.complete_userspace_io =
  1113. kvm_hv_hypercall_complete_userspace;
  1114. return 0;
  1115. default:
  1116. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  1117. break;
  1118. }
  1119. set_result:
  1120. ret = res | (((u64)rep_done & 0xfff) << 32);
  1121. kvm_hv_hypercall_set_result(vcpu, ret);
  1122. return 1;
  1123. }