hyperv.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * KVM Microsoft Hyper-V emulation
  3. *
  4. * derived from arch/x86/kvm/x86.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
  11. *
  12. * Authors:
  13. * Avi Kivity <avi@qumranet.com>
  14. * Yaniv Kamay <yaniv@qumranet.com>
  15. * Amit Shah <amit.shah@qumranet.com>
  16. * Ben-Ami Yassour <benami@il.ibm.com>
  17. * Andrey Smetanin <asmetanin@virtuozzo.com>
  18. *
  19. * This work is licensed under the terms of the GNU GPL, version 2. See
  20. * the COPYING file in the top-level directory.
  21. *
  22. */
  23. #include "x86.h"
  24. #include "lapic.h"
  25. #include "hyperv.h"
  26. #include <linux/kvm_host.h>
  27. #include <trace/events/kvm.h>
  28. #include "trace.h"
  29. static bool kvm_hv_msr_partition_wide(u32 msr)
  30. {
  31. bool r = false;
  32. switch (msr) {
  33. case HV_X64_MSR_GUEST_OS_ID:
  34. case HV_X64_MSR_HYPERCALL:
  35. case HV_X64_MSR_REFERENCE_TSC:
  36. case HV_X64_MSR_TIME_REF_COUNT:
  37. case HV_X64_MSR_CRASH_CTL:
  38. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  39. r = true;
  40. break;
  41. }
  42. return r;
  43. }
  44. static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
  45. u32 index, u64 *pdata)
  46. {
  47. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  48. if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
  49. return -EINVAL;
  50. *pdata = hv->hv_crash_param[index];
  51. return 0;
  52. }
  53. static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
  54. {
  55. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  56. *pdata = hv->hv_crash_ctl;
  57. return 0;
  58. }
  59. static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
  60. {
  61. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  62. if (host)
  63. hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
  64. if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
  65. vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
  66. hv->hv_crash_param[0],
  67. hv->hv_crash_param[1],
  68. hv->hv_crash_param[2],
  69. hv->hv_crash_param[3],
  70. hv->hv_crash_param[4]);
  71. /* Send notification about crash to user space */
  72. kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
  73. }
  74. return 0;
  75. }
  76. static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
  77. u32 index, u64 data)
  78. {
  79. struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
  80. if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
  81. return -EINVAL;
  82. hv->hv_crash_param[index] = data;
  83. return 0;
  84. }
  85. static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
  86. bool host)
  87. {
  88. struct kvm *kvm = vcpu->kvm;
  89. struct kvm_hv *hv = &kvm->arch.hyperv;
  90. switch (msr) {
  91. case HV_X64_MSR_GUEST_OS_ID:
  92. hv->hv_guest_os_id = data;
  93. /* setting guest os id to zero disables hypercall page */
  94. if (!hv->hv_guest_os_id)
  95. hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  96. break;
  97. case HV_X64_MSR_HYPERCALL: {
  98. u64 gfn;
  99. unsigned long addr;
  100. u8 instructions[4];
  101. /* if guest os id is not set hypercall should remain disabled */
  102. if (!hv->hv_guest_os_id)
  103. break;
  104. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  105. hv->hv_hypercall = data;
  106. break;
  107. }
  108. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  109. addr = gfn_to_hva(kvm, gfn);
  110. if (kvm_is_error_hva(addr))
  111. return 1;
  112. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  113. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  114. if (__copy_to_user((void __user *)addr, instructions, 4))
  115. return 1;
  116. hv->hv_hypercall = data;
  117. mark_page_dirty(kvm, gfn);
  118. break;
  119. }
  120. case HV_X64_MSR_REFERENCE_TSC: {
  121. u64 gfn;
  122. HV_REFERENCE_TSC_PAGE tsc_ref;
  123. memset(&tsc_ref, 0, sizeof(tsc_ref));
  124. hv->hv_tsc_page = data;
  125. if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
  126. break;
  127. gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
  128. if (kvm_write_guest(
  129. kvm,
  130. gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
  131. &tsc_ref, sizeof(tsc_ref)))
  132. return 1;
  133. mark_page_dirty(kvm, gfn);
  134. break;
  135. }
  136. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  137. return kvm_hv_msr_set_crash_data(vcpu,
  138. msr - HV_X64_MSR_CRASH_P0,
  139. data);
  140. case HV_X64_MSR_CRASH_CTL:
  141. return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
  142. default:
  143. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  144. msr, data);
  145. return 1;
  146. }
  147. return 0;
  148. }
  149. static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  150. {
  151. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  152. switch (msr) {
  153. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  154. u64 gfn;
  155. unsigned long addr;
  156. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  157. hv->hv_vapic = data;
  158. if (kvm_lapic_enable_pv_eoi(vcpu, 0))
  159. return 1;
  160. break;
  161. }
  162. gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
  163. addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
  164. if (kvm_is_error_hva(addr))
  165. return 1;
  166. if (__clear_user((void __user *)addr, PAGE_SIZE))
  167. return 1;
  168. hv->hv_vapic = data;
  169. kvm_vcpu_mark_page_dirty(vcpu, gfn);
  170. if (kvm_lapic_enable_pv_eoi(vcpu,
  171. gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
  172. return 1;
  173. break;
  174. }
  175. case HV_X64_MSR_EOI:
  176. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  177. case HV_X64_MSR_ICR:
  178. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  179. case HV_X64_MSR_TPR:
  180. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  181. default:
  182. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  183. msr, data);
  184. return 1;
  185. }
  186. return 0;
  187. }
  188. static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  189. {
  190. u64 data = 0;
  191. struct kvm *kvm = vcpu->kvm;
  192. struct kvm_hv *hv = &kvm->arch.hyperv;
  193. switch (msr) {
  194. case HV_X64_MSR_GUEST_OS_ID:
  195. data = hv->hv_guest_os_id;
  196. break;
  197. case HV_X64_MSR_HYPERCALL:
  198. data = hv->hv_hypercall;
  199. break;
  200. case HV_X64_MSR_TIME_REF_COUNT: {
  201. data =
  202. div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
  203. break;
  204. }
  205. case HV_X64_MSR_REFERENCE_TSC:
  206. data = hv->hv_tsc_page;
  207. break;
  208. case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
  209. return kvm_hv_msr_get_crash_data(vcpu,
  210. msr - HV_X64_MSR_CRASH_P0,
  211. pdata);
  212. case HV_X64_MSR_CRASH_CTL:
  213. return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
  214. default:
  215. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  216. return 1;
  217. }
  218. *pdata = data;
  219. return 0;
  220. }
  221. static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  222. {
  223. u64 data = 0;
  224. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  225. switch (msr) {
  226. case HV_X64_MSR_VP_INDEX: {
  227. int r;
  228. struct kvm_vcpu *v;
  229. kvm_for_each_vcpu(r, v, vcpu->kvm) {
  230. if (v == vcpu) {
  231. data = r;
  232. break;
  233. }
  234. }
  235. break;
  236. }
  237. case HV_X64_MSR_EOI:
  238. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  239. case HV_X64_MSR_ICR:
  240. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  241. case HV_X64_MSR_TPR:
  242. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  243. case HV_X64_MSR_APIC_ASSIST_PAGE:
  244. data = hv->hv_vapic;
  245. break;
  246. default:
  247. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  248. return 1;
  249. }
  250. *pdata = data;
  251. return 0;
  252. }
  253. int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
  254. {
  255. if (kvm_hv_msr_partition_wide(msr)) {
  256. int r;
  257. mutex_lock(&vcpu->kvm->lock);
  258. r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
  259. mutex_unlock(&vcpu->kvm->lock);
  260. return r;
  261. } else
  262. return kvm_hv_set_msr(vcpu, msr, data);
  263. }
  264. int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  265. {
  266. if (kvm_hv_msr_partition_wide(msr)) {
  267. int r;
  268. mutex_lock(&vcpu->kvm->lock);
  269. r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
  270. mutex_unlock(&vcpu->kvm->lock);
  271. return r;
  272. } else
  273. return kvm_hv_get_msr(vcpu, msr, pdata);
  274. }
  275. bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  276. {
  277. return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  278. }
  279. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  280. {
  281. u64 param, ingpa, outgpa, ret;
  282. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  283. bool fast, longmode;
  284. /*
  285. * hypercall generates UD from non zero cpl and real mode
  286. * per HYPER-V spec
  287. */
  288. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  289. kvm_queue_exception(vcpu, UD_VECTOR);
  290. return 0;
  291. }
  292. longmode = is_64_bit_mode(vcpu);
  293. if (!longmode) {
  294. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  295. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  296. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  297. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  298. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  299. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  300. }
  301. #ifdef CONFIG_X86_64
  302. else {
  303. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  304. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  305. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  306. }
  307. #endif
  308. code = param & 0xffff;
  309. fast = (param >> 16) & 0x1;
  310. rep_cnt = (param >> 32) & 0xfff;
  311. rep_idx = (param >> 48) & 0xfff;
  312. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  313. switch (code) {
  314. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  315. kvm_vcpu_on_spin(vcpu);
  316. break;
  317. default:
  318. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  319. break;
  320. }
  321. ret = res | (((u64)rep_done & 0xfff) << 32);
  322. if (longmode) {
  323. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  324. } else {
  325. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  326. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  327. }
  328. return 1;
  329. }