hyperv.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /*
  2. * KVM Microsoft Hyper-V emulation
  3. *
  4. * derived from arch/x86/kvm/x86.c
  5. *
  6. * Copyright (C) 2006 Qumranet, Inc.
  7. * Copyright (C) 2008 Qumranet, Inc.
  8. * Copyright IBM Corporation, 2008
  9. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  10. * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
  11. *
  12. * Authors:
  13. * Avi Kivity <avi@qumranet.com>
  14. * Yaniv Kamay <yaniv@qumranet.com>
  15. * Amit Shah <amit.shah@qumranet.com>
  16. * Ben-Ami Yassour <benami@il.ibm.com>
  17. * Andrey Smetanin <asmetanin@virtuozzo.com>
  18. *
  19. * This work is licensed under the terms of the GNU GPL, version 2. See
  20. * the COPYING file in the top-level directory.
  21. *
  22. */
  23. #include "x86.h"
  24. #include "lapic.h"
  25. #include "hyperv.h"
  26. #include <linux/kvm_host.h>
  27. #include <trace/events/kvm.h>
  28. #include "trace.h"
  29. static bool kvm_hv_msr_partition_wide(u32 msr)
  30. {
  31. bool r = false;
  32. switch (msr) {
  33. case HV_X64_MSR_GUEST_OS_ID:
  34. case HV_X64_MSR_HYPERCALL:
  35. case HV_X64_MSR_REFERENCE_TSC:
  36. case HV_X64_MSR_TIME_REF_COUNT:
  37. r = true;
  38. break;
  39. }
  40. return r;
  41. }
  42. static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  43. {
  44. struct kvm *kvm = vcpu->kvm;
  45. struct kvm_hv *hv = &kvm->arch.hyperv;
  46. switch (msr) {
  47. case HV_X64_MSR_GUEST_OS_ID:
  48. hv->hv_guest_os_id = data;
  49. /* setting guest os id to zero disables hypercall page */
  50. if (!hv->hv_guest_os_id)
  51. hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
  52. break;
  53. case HV_X64_MSR_HYPERCALL: {
  54. u64 gfn;
  55. unsigned long addr;
  56. u8 instructions[4];
  57. /* if guest os id is not set hypercall should remain disabled */
  58. if (!hv->hv_guest_os_id)
  59. break;
  60. if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
  61. hv->hv_hypercall = data;
  62. break;
  63. }
  64. gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
  65. addr = gfn_to_hva(kvm, gfn);
  66. if (kvm_is_error_hva(addr))
  67. return 1;
  68. kvm_x86_ops->patch_hypercall(vcpu, instructions);
  69. ((unsigned char *)instructions)[3] = 0xc3; /* ret */
  70. if (__copy_to_user((void __user *)addr, instructions, 4))
  71. return 1;
  72. hv->hv_hypercall = data;
  73. mark_page_dirty(kvm, gfn);
  74. break;
  75. }
  76. case HV_X64_MSR_REFERENCE_TSC: {
  77. u64 gfn;
  78. HV_REFERENCE_TSC_PAGE tsc_ref;
  79. memset(&tsc_ref, 0, sizeof(tsc_ref));
  80. hv->hv_tsc_page = data;
  81. if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
  82. break;
  83. gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
  84. if (kvm_write_guest(
  85. kvm,
  86. gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
  87. &tsc_ref, sizeof(tsc_ref)))
  88. return 1;
  89. mark_page_dirty(kvm, gfn);
  90. break;
  91. }
  92. default:
  93. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  94. msr, data);
  95. return 1;
  96. }
  97. return 0;
  98. }
  99. static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  100. {
  101. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  102. switch (msr) {
  103. case HV_X64_MSR_APIC_ASSIST_PAGE: {
  104. u64 gfn;
  105. unsigned long addr;
  106. if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
  107. hv->hv_vapic = data;
  108. if (kvm_lapic_enable_pv_eoi(vcpu, 0))
  109. return 1;
  110. break;
  111. }
  112. gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
  113. addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
  114. if (kvm_is_error_hva(addr))
  115. return 1;
  116. if (__clear_user((void __user *)addr, PAGE_SIZE))
  117. return 1;
  118. hv->hv_vapic = data;
  119. kvm_vcpu_mark_page_dirty(vcpu, gfn);
  120. if (kvm_lapic_enable_pv_eoi(vcpu,
  121. gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
  122. return 1;
  123. break;
  124. }
  125. case HV_X64_MSR_EOI:
  126. return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
  127. case HV_X64_MSR_ICR:
  128. return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
  129. case HV_X64_MSR_TPR:
  130. return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
  131. default:
  132. vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
  133. msr, data);
  134. return 1;
  135. }
  136. return 0;
  137. }
  138. static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  139. {
  140. u64 data = 0;
  141. struct kvm *kvm = vcpu->kvm;
  142. struct kvm_hv *hv = &kvm->arch.hyperv;
  143. switch (msr) {
  144. case HV_X64_MSR_GUEST_OS_ID:
  145. data = hv->hv_guest_os_id;
  146. break;
  147. case HV_X64_MSR_HYPERCALL:
  148. data = hv->hv_hypercall;
  149. break;
  150. case HV_X64_MSR_TIME_REF_COUNT: {
  151. data =
  152. div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
  153. break;
  154. }
  155. case HV_X64_MSR_REFERENCE_TSC:
  156. data = hv->hv_tsc_page;
  157. break;
  158. default:
  159. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  160. return 1;
  161. }
  162. *pdata = data;
  163. return 0;
  164. }
  165. static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  166. {
  167. u64 data = 0;
  168. struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
  169. switch (msr) {
  170. case HV_X64_MSR_VP_INDEX: {
  171. int r;
  172. struct kvm_vcpu *v;
  173. kvm_for_each_vcpu(r, v, vcpu->kvm) {
  174. if (v == vcpu) {
  175. data = r;
  176. break;
  177. }
  178. }
  179. break;
  180. }
  181. case HV_X64_MSR_EOI:
  182. return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
  183. case HV_X64_MSR_ICR:
  184. return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
  185. case HV_X64_MSR_TPR:
  186. return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
  187. case HV_X64_MSR_APIC_ASSIST_PAGE:
  188. data = hv->hv_vapic;
  189. break;
  190. default:
  191. vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
  192. return 1;
  193. }
  194. *pdata = data;
  195. return 0;
  196. }
  197. int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  198. {
  199. if (kvm_hv_msr_partition_wide(msr)) {
  200. int r;
  201. mutex_lock(&vcpu->kvm->lock);
  202. r = kvm_hv_set_msr_pw(vcpu, msr, data);
  203. mutex_unlock(&vcpu->kvm->lock);
  204. return r;
  205. } else
  206. return kvm_hv_set_msr(vcpu, msr, data);
  207. }
  208. int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  209. {
  210. if (kvm_hv_msr_partition_wide(msr)) {
  211. int r;
  212. mutex_lock(&vcpu->kvm->lock);
  213. r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
  214. mutex_unlock(&vcpu->kvm->lock);
  215. return r;
  216. } else
  217. return kvm_hv_get_msr(vcpu, msr, pdata);
  218. }
  219. bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  220. {
  221. return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
  222. }
  223. int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
  224. {
  225. u64 param, ingpa, outgpa, ret;
  226. uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
  227. bool fast, longmode;
  228. /*
  229. * hypercall generates UD from non zero cpl and real mode
  230. * per HYPER-V spec
  231. */
  232. if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
  233. kvm_queue_exception(vcpu, UD_VECTOR);
  234. return 0;
  235. }
  236. longmode = is_64_bit_mode(vcpu);
  237. if (!longmode) {
  238. param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
  239. (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
  240. ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
  241. (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
  242. outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
  243. (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
  244. }
  245. #ifdef CONFIG_X86_64
  246. else {
  247. param = kvm_register_read(vcpu, VCPU_REGS_RCX);
  248. ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
  249. outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
  250. }
  251. #endif
  252. code = param & 0xffff;
  253. fast = (param >> 16) & 0x1;
  254. rep_cnt = (param >> 32) & 0xfff;
  255. rep_idx = (param >> 48) & 0xfff;
  256. trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
  257. switch (code) {
  258. case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
  259. kvm_vcpu_on_spin(vcpu);
  260. break;
  261. default:
  262. res = HV_STATUS_INVALID_HYPERCALL_CODE;
  263. break;
  264. }
  265. ret = res | (((u64)rep_done & 0xfff) << 32);
  266. if (longmode) {
  267. kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
  268. } else {
  269. kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
  270. kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
  271. }
  272. return 1;
  273. }