mips.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: MIPS specific KVM APIs
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/fs.h>
  16. #include <linux/bootmem.h>
  17. #include <asm/fpu.h>
  18. #include <asm/page.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <linux/kvm_host.h>
  23. #include "interrupt.h"
  24. #include "commpage.h"
  25. #define CREATE_TRACE_POINTS
  26. #include "trace.h"
  27. #ifndef VECTORSPACING
  28. #define VECTORSPACING 0x100 /* for EI/VI mode */
  29. #endif
  30. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  31. struct kvm_stats_debugfs_item debugfs_entries[] = {
  32. { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
  33. { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
  34. { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
  35. { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
  36. { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  37. { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
  38. { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
  39. { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
  40. { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
  41. { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
  42. { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
  43. { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
  44. { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
  45. { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  46. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
  47. { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
  48. {NULL}
  49. };
  50. static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
  51. {
  52. int i;
  53. for_each_possible_cpu(i) {
  54. vcpu->arch.guest_kernel_asid[i] = 0;
  55. vcpu->arch.guest_user_asid[i] = 0;
  56. }
  57. return 0;
  58. }
  59. /*
  60. * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  61. * Config7, so we are "runnable" if interrupts are pending
  62. */
  63. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  64. {
  65. return !!(vcpu->arch.pending_exceptions);
  66. }
  67. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  68. {
  69. return 1;
  70. }
  71. int kvm_arch_hardware_enable(void)
  72. {
  73. return 0;
  74. }
  75. int kvm_arch_hardware_setup(void)
  76. {
  77. return 0;
  78. }
  79. void kvm_arch_check_processor_compat(void *rtn)
  80. {
  81. *(int *)rtn = 0;
  82. }
  83. static void kvm_mips_init_tlbs(struct kvm *kvm)
  84. {
  85. unsigned long wired;
  86. /*
  87. * Add a wired entry to the TLB, it is used to map the commpage to
  88. * the Guest kernel
  89. */
  90. wired = read_c0_wired();
  91. write_c0_wired(wired + 1);
  92. mtc0_tlbw_hazard();
  93. kvm->arch.commpage_tlb = wired;
  94. kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
  95. kvm->arch.commpage_tlb);
  96. }
  97. static void kvm_mips_init_vm_percpu(void *arg)
  98. {
  99. struct kvm *kvm = (struct kvm *)arg;
  100. kvm_mips_init_tlbs(kvm);
  101. kvm_mips_callbacks->vm_init(kvm);
  102. }
  103. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  104. {
  105. if (atomic_inc_return(&kvm_mips_instance) == 1) {
  106. kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
  107. __func__);
  108. on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
  109. }
  110. return 0;
  111. }
  112. void kvm_mips_free_vcpus(struct kvm *kvm)
  113. {
  114. unsigned int i;
  115. struct kvm_vcpu *vcpu;
  116. /* Put the pages we reserved for the guest pmap */
  117. for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
  118. if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
  119. kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
  120. }
  121. kfree(kvm->arch.guest_pmap);
  122. kvm_for_each_vcpu(i, vcpu, kvm) {
  123. kvm_arch_vcpu_free(vcpu);
  124. }
  125. mutex_lock(&kvm->lock);
  126. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  127. kvm->vcpus[i] = NULL;
  128. atomic_set(&kvm->online_vcpus, 0);
  129. mutex_unlock(&kvm->lock);
  130. }
  131. static void kvm_mips_uninit_tlbs(void *arg)
  132. {
  133. /* Restore wired count */
  134. write_c0_wired(0);
  135. mtc0_tlbw_hazard();
  136. /* Clear out all the TLBs */
  137. kvm_local_flush_tlb_all();
  138. }
  139. void kvm_arch_destroy_vm(struct kvm *kvm)
  140. {
  141. kvm_mips_free_vcpus(kvm);
  142. /* If this is the last instance, restore wired count */
  143. if (atomic_dec_return(&kvm_mips_instance) == 0) {
  144. kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
  145. __func__);
  146. on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
  147. }
  148. }
  149. long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
  150. unsigned long arg)
  151. {
  152. return -ENOIOCTLCMD;
  153. }
  154. int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  155. unsigned long npages)
  156. {
  157. return 0;
  158. }
  159. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  160. struct kvm_memory_slot *memslot,
  161. struct kvm_userspace_memory_region *mem,
  162. enum kvm_mr_change change)
  163. {
  164. return 0;
  165. }
  166. void kvm_arch_commit_memory_region(struct kvm *kvm,
  167. struct kvm_userspace_memory_region *mem,
  168. const struct kvm_memory_slot *old,
  169. enum kvm_mr_change change)
  170. {
  171. unsigned long npages = 0;
  172. int i;
  173. kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
  174. __func__, kvm, mem->slot, mem->guest_phys_addr,
  175. mem->memory_size, mem->userspace_addr);
  176. /* Setup Guest PMAP table */
  177. if (!kvm->arch.guest_pmap) {
  178. if (mem->slot == 0)
  179. npages = mem->memory_size >> PAGE_SHIFT;
  180. if (npages) {
  181. kvm->arch.guest_pmap_npages = npages;
  182. kvm->arch.guest_pmap =
  183. kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
  184. if (!kvm->arch.guest_pmap) {
  185. kvm_err("Failed to allocate guest PMAP");
  186. return;
  187. }
  188. kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
  189. npages, kvm->arch.guest_pmap);
  190. /* Now setup the page table */
  191. for (i = 0; i < npages; i++)
  192. kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
  193. }
  194. }
  195. }
  196. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  197. {
  198. int err, size, offset;
  199. void *gebase;
  200. int i;
  201. struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
  202. if (!vcpu) {
  203. err = -ENOMEM;
  204. goto out;
  205. }
  206. err = kvm_vcpu_init(vcpu, kvm, id);
  207. if (err)
  208. goto out_free_cpu;
  209. kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
  210. /*
  211. * Allocate space for host mode exception handlers that handle
  212. * guest mode exits
  213. */
  214. if (cpu_has_veic || cpu_has_vint)
  215. size = 0x200 + VECTORSPACING * 64;
  216. else
  217. size = 0x4000;
  218. /* Save Linux EBASE */
  219. vcpu->arch.host_ebase = (void *)read_c0_ebase();
  220. gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
  221. if (!gebase) {
  222. err = -ENOMEM;
  223. goto out_free_cpu;
  224. }
  225. kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
  226. ALIGN(size, PAGE_SIZE), gebase);
  227. /* Save new ebase */
  228. vcpu->arch.guest_ebase = gebase;
  229. /* Copy L1 Guest Exception handler to correct offset */
  230. /* TLB Refill, EXL = 0 */
  231. memcpy(gebase, mips32_exception,
  232. mips32_exceptionEnd - mips32_exception);
  233. /* General Exception Entry point */
  234. memcpy(gebase + 0x180, mips32_exception,
  235. mips32_exceptionEnd - mips32_exception);
  236. /* For vectored interrupts poke the exception code @ all offsets 0-7 */
  237. for (i = 0; i < 8; i++) {
  238. kvm_debug("L1 Vectored handler @ %p\n",
  239. gebase + 0x200 + (i * VECTORSPACING));
  240. memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
  241. mips32_exceptionEnd - mips32_exception);
  242. }
  243. /* General handler, relocate to unmapped space for sanity's sake */
  244. offset = 0x2000;
  245. kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
  246. gebase + offset,
  247. mips32_GuestExceptionEnd - mips32_GuestException);
  248. memcpy(gebase + offset, mips32_GuestException,
  249. mips32_GuestExceptionEnd - mips32_GuestException);
  250. /* Invalidate the icache for these ranges */
  251. local_flush_icache_range((unsigned long)gebase,
  252. (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
  253. /*
  254. * Allocate comm page for guest kernel, a TLB will be reserved for
  255. * mapping GVA @ 0xFFFF8000 to this page
  256. */
  257. vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
  258. if (!vcpu->arch.kseg0_commpage) {
  259. err = -ENOMEM;
  260. goto out_free_gebase;
  261. }
  262. kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
  263. kvm_mips_commpage_init(vcpu);
  264. /* Init */
  265. vcpu->arch.last_sched_cpu = -1;
  266. /* Start off the timer */
  267. kvm_mips_init_count(vcpu);
  268. return vcpu;
  269. out_free_gebase:
  270. kfree(gebase);
  271. out_free_cpu:
  272. kfree(vcpu);
  273. out:
  274. return ERR_PTR(err);
  275. }
  276. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  277. {
  278. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  279. kvm_vcpu_uninit(vcpu);
  280. kvm_mips_dump_stats(vcpu);
  281. kfree(vcpu->arch.guest_ebase);
  282. kfree(vcpu->arch.kseg0_commpage);
  283. kfree(vcpu);
  284. }
  285. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  286. {
  287. kvm_arch_vcpu_free(vcpu);
  288. }
  289. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  290. struct kvm_guest_debug *dbg)
  291. {
  292. return -ENOIOCTLCMD;
  293. }
  294. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  295. {
  296. int r = 0;
  297. sigset_t sigsaved;
  298. if (vcpu->sigset_active)
  299. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  300. if (vcpu->mmio_needed) {
  301. if (!vcpu->mmio_is_write)
  302. kvm_mips_complete_mmio_load(vcpu, run);
  303. vcpu->mmio_needed = 0;
  304. }
  305. lose_fpu(1);
  306. local_irq_disable();
  307. /* Check if we have any exceptions/interrupts pending */
  308. kvm_mips_deliver_interrupts(vcpu,
  309. kvm_read_c0_guest_cause(vcpu->arch.cop0));
  310. kvm_guest_enter();
  311. /* Disable hardware page table walking while in guest */
  312. htw_stop();
  313. r = __kvm_mips_vcpu_run(run, vcpu);
  314. /* Re-enable HTW before enabling interrupts */
  315. htw_start();
  316. kvm_guest_exit();
  317. local_irq_enable();
  318. if (vcpu->sigset_active)
  319. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  320. return r;
  321. }
  322. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  323. struct kvm_mips_interrupt *irq)
  324. {
  325. int intr = (int)irq->irq;
  326. struct kvm_vcpu *dvcpu = NULL;
  327. if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
  328. kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
  329. (int)intr);
  330. if (irq->cpu == -1)
  331. dvcpu = vcpu;
  332. else
  333. dvcpu = vcpu->kvm->vcpus[irq->cpu];
  334. if (intr == 2 || intr == 3 || intr == 4) {
  335. kvm_mips_callbacks->queue_io_int(dvcpu, irq);
  336. } else if (intr == -2 || intr == -3 || intr == -4) {
  337. kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
  338. } else {
  339. kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
  340. irq->cpu, irq->irq);
  341. return -EINVAL;
  342. }
  343. dvcpu->arch.wait = 0;
  344. if (waitqueue_active(&dvcpu->wq))
  345. wake_up_interruptible(&dvcpu->wq);
  346. return 0;
  347. }
  348. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  349. struct kvm_mp_state *mp_state)
  350. {
  351. return -ENOIOCTLCMD;
  352. }
  353. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  354. struct kvm_mp_state *mp_state)
  355. {
  356. return -ENOIOCTLCMD;
  357. }
  358. static u64 kvm_mips_get_one_regs[] = {
  359. KVM_REG_MIPS_R0,
  360. KVM_REG_MIPS_R1,
  361. KVM_REG_MIPS_R2,
  362. KVM_REG_MIPS_R3,
  363. KVM_REG_MIPS_R4,
  364. KVM_REG_MIPS_R5,
  365. KVM_REG_MIPS_R6,
  366. KVM_REG_MIPS_R7,
  367. KVM_REG_MIPS_R8,
  368. KVM_REG_MIPS_R9,
  369. KVM_REG_MIPS_R10,
  370. KVM_REG_MIPS_R11,
  371. KVM_REG_MIPS_R12,
  372. KVM_REG_MIPS_R13,
  373. KVM_REG_MIPS_R14,
  374. KVM_REG_MIPS_R15,
  375. KVM_REG_MIPS_R16,
  376. KVM_REG_MIPS_R17,
  377. KVM_REG_MIPS_R18,
  378. KVM_REG_MIPS_R19,
  379. KVM_REG_MIPS_R20,
  380. KVM_REG_MIPS_R21,
  381. KVM_REG_MIPS_R22,
  382. KVM_REG_MIPS_R23,
  383. KVM_REG_MIPS_R24,
  384. KVM_REG_MIPS_R25,
  385. KVM_REG_MIPS_R26,
  386. KVM_REG_MIPS_R27,
  387. KVM_REG_MIPS_R28,
  388. KVM_REG_MIPS_R29,
  389. KVM_REG_MIPS_R30,
  390. KVM_REG_MIPS_R31,
  391. KVM_REG_MIPS_HI,
  392. KVM_REG_MIPS_LO,
  393. KVM_REG_MIPS_PC,
  394. KVM_REG_MIPS_CP0_INDEX,
  395. KVM_REG_MIPS_CP0_CONTEXT,
  396. KVM_REG_MIPS_CP0_USERLOCAL,
  397. KVM_REG_MIPS_CP0_PAGEMASK,
  398. KVM_REG_MIPS_CP0_WIRED,
  399. KVM_REG_MIPS_CP0_HWRENA,
  400. KVM_REG_MIPS_CP0_BADVADDR,
  401. KVM_REG_MIPS_CP0_COUNT,
  402. KVM_REG_MIPS_CP0_ENTRYHI,
  403. KVM_REG_MIPS_CP0_COMPARE,
  404. KVM_REG_MIPS_CP0_STATUS,
  405. KVM_REG_MIPS_CP0_CAUSE,
  406. KVM_REG_MIPS_CP0_EPC,
  407. KVM_REG_MIPS_CP0_CONFIG,
  408. KVM_REG_MIPS_CP0_CONFIG1,
  409. KVM_REG_MIPS_CP0_CONFIG2,
  410. KVM_REG_MIPS_CP0_CONFIG3,
  411. KVM_REG_MIPS_CP0_CONFIG7,
  412. KVM_REG_MIPS_CP0_ERROREPC,
  413. KVM_REG_MIPS_COUNT_CTL,
  414. KVM_REG_MIPS_COUNT_RESUME,
  415. KVM_REG_MIPS_COUNT_HZ,
  416. };
  417. static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
  418. const struct kvm_one_reg *reg)
  419. {
  420. struct mips_coproc *cop0 = vcpu->arch.cop0;
  421. int ret;
  422. s64 v;
  423. switch (reg->id) {
  424. case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
  425. v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
  426. break;
  427. case KVM_REG_MIPS_HI:
  428. v = (long)vcpu->arch.hi;
  429. break;
  430. case KVM_REG_MIPS_LO:
  431. v = (long)vcpu->arch.lo;
  432. break;
  433. case KVM_REG_MIPS_PC:
  434. v = (long)vcpu->arch.pc;
  435. break;
  436. case KVM_REG_MIPS_CP0_INDEX:
  437. v = (long)kvm_read_c0_guest_index(cop0);
  438. break;
  439. case KVM_REG_MIPS_CP0_CONTEXT:
  440. v = (long)kvm_read_c0_guest_context(cop0);
  441. break;
  442. case KVM_REG_MIPS_CP0_USERLOCAL:
  443. v = (long)kvm_read_c0_guest_userlocal(cop0);
  444. break;
  445. case KVM_REG_MIPS_CP0_PAGEMASK:
  446. v = (long)kvm_read_c0_guest_pagemask(cop0);
  447. break;
  448. case KVM_REG_MIPS_CP0_WIRED:
  449. v = (long)kvm_read_c0_guest_wired(cop0);
  450. break;
  451. case KVM_REG_MIPS_CP0_HWRENA:
  452. v = (long)kvm_read_c0_guest_hwrena(cop0);
  453. break;
  454. case KVM_REG_MIPS_CP0_BADVADDR:
  455. v = (long)kvm_read_c0_guest_badvaddr(cop0);
  456. break;
  457. case KVM_REG_MIPS_CP0_ENTRYHI:
  458. v = (long)kvm_read_c0_guest_entryhi(cop0);
  459. break;
  460. case KVM_REG_MIPS_CP0_COMPARE:
  461. v = (long)kvm_read_c0_guest_compare(cop0);
  462. break;
  463. case KVM_REG_MIPS_CP0_STATUS:
  464. v = (long)kvm_read_c0_guest_status(cop0);
  465. break;
  466. case KVM_REG_MIPS_CP0_CAUSE:
  467. v = (long)kvm_read_c0_guest_cause(cop0);
  468. break;
  469. case KVM_REG_MIPS_CP0_EPC:
  470. v = (long)kvm_read_c0_guest_epc(cop0);
  471. break;
  472. case KVM_REG_MIPS_CP0_ERROREPC:
  473. v = (long)kvm_read_c0_guest_errorepc(cop0);
  474. break;
  475. case KVM_REG_MIPS_CP0_CONFIG:
  476. v = (long)kvm_read_c0_guest_config(cop0);
  477. break;
  478. case KVM_REG_MIPS_CP0_CONFIG1:
  479. v = (long)kvm_read_c0_guest_config1(cop0);
  480. break;
  481. case KVM_REG_MIPS_CP0_CONFIG2:
  482. v = (long)kvm_read_c0_guest_config2(cop0);
  483. break;
  484. case KVM_REG_MIPS_CP0_CONFIG3:
  485. v = (long)kvm_read_c0_guest_config3(cop0);
  486. break;
  487. case KVM_REG_MIPS_CP0_CONFIG7:
  488. v = (long)kvm_read_c0_guest_config7(cop0);
  489. break;
  490. /* registers to be handled specially */
  491. case KVM_REG_MIPS_CP0_COUNT:
  492. case KVM_REG_MIPS_COUNT_CTL:
  493. case KVM_REG_MIPS_COUNT_RESUME:
  494. case KVM_REG_MIPS_COUNT_HZ:
  495. ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
  496. if (ret)
  497. return ret;
  498. break;
  499. default:
  500. return -EINVAL;
  501. }
  502. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  503. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  504. return put_user(v, uaddr64);
  505. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  506. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  507. u32 v32 = (u32)v;
  508. return put_user(v32, uaddr32);
  509. } else {
  510. return -EINVAL;
  511. }
  512. }
  513. static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
  514. const struct kvm_one_reg *reg)
  515. {
  516. struct mips_coproc *cop0 = vcpu->arch.cop0;
  517. u64 v;
  518. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  519. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  520. if (get_user(v, uaddr64) != 0)
  521. return -EFAULT;
  522. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  523. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  524. s32 v32;
  525. if (get_user(v32, uaddr32) != 0)
  526. return -EFAULT;
  527. v = (s64)v32;
  528. } else {
  529. return -EINVAL;
  530. }
  531. switch (reg->id) {
  532. case KVM_REG_MIPS_R0:
  533. /* Silently ignore requests to set $0 */
  534. break;
  535. case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
  536. vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
  537. break;
  538. case KVM_REG_MIPS_HI:
  539. vcpu->arch.hi = v;
  540. break;
  541. case KVM_REG_MIPS_LO:
  542. vcpu->arch.lo = v;
  543. break;
  544. case KVM_REG_MIPS_PC:
  545. vcpu->arch.pc = v;
  546. break;
  547. case KVM_REG_MIPS_CP0_INDEX:
  548. kvm_write_c0_guest_index(cop0, v);
  549. break;
  550. case KVM_REG_MIPS_CP0_CONTEXT:
  551. kvm_write_c0_guest_context(cop0, v);
  552. break;
  553. case KVM_REG_MIPS_CP0_USERLOCAL:
  554. kvm_write_c0_guest_userlocal(cop0, v);
  555. break;
  556. case KVM_REG_MIPS_CP0_PAGEMASK:
  557. kvm_write_c0_guest_pagemask(cop0, v);
  558. break;
  559. case KVM_REG_MIPS_CP0_WIRED:
  560. kvm_write_c0_guest_wired(cop0, v);
  561. break;
  562. case KVM_REG_MIPS_CP0_HWRENA:
  563. kvm_write_c0_guest_hwrena(cop0, v);
  564. break;
  565. case KVM_REG_MIPS_CP0_BADVADDR:
  566. kvm_write_c0_guest_badvaddr(cop0, v);
  567. break;
  568. case KVM_REG_MIPS_CP0_ENTRYHI:
  569. kvm_write_c0_guest_entryhi(cop0, v);
  570. break;
  571. case KVM_REG_MIPS_CP0_STATUS:
  572. kvm_write_c0_guest_status(cop0, v);
  573. break;
  574. case KVM_REG_MIPS_CP0_EPC:
  575. kvm_write_c0_guest_epc(cop0, v);
  576. break;
  577. case KVM_REG_MIPS_CP0_ERROREPC:
  578. kvm_write_c0_guest_errorepc(cop0, v);
  579. break;
  580. /* registers to be handled specially */
  581. case KVM_REG_MIPS_CP0_COUNT:
  582. case KVM_REG_MIPS_CP0_COMPARE:
  583. case KVM_REG_MIPS_CP0_CAUSE:
  584. case KVM_REG_MIPS_COUNT_CTL:
  585. case KVM_REG_MIPS_COUNT_RESUME:
  586. case KVM_REG_MIPS_COUNT_HZ:
  587. return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
  588. default:
  589. return -EINVAL;
  590. }
  591. return 0;
  592. }
  593. long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
  594. unsigned long arg)
  595. {
  596. struct kvm_vcpu *vcpu = filp->private_data;
  597. void __user *argp = (void __user *)arg;
  598. long r;
  599. switch (ioctl) {
  600. case KVM_SET_ONE_REG:
  601. case KVM_GET_ONE_REG: {
  602. struct kvm_one_reg reg;
  603. if (copy_from_user(&reg, argp, sizeof(reg)))
  604. return -EFAULT;
  605. if (ioctl == KVM_SET_ONE_REG)
  606. return kvm_mips_set_reg(vcpu, &reg);
  607. else
  608. return kvm_mips_get_reg(vcpu, &reg);
  609. }
  610. case KVM_GET_REG_LIST: {
  611. struct kvm_reg_list __user *user_list = argp;
  612. u64 __user *reg_dest;
  613. struct kvm_reg_list reg_list;
  614. unsigned n;
  615. if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
  616. return -EFAULT;
  617. n = reg_list.n;
  618. reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
  619. if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
  620. return -EFAULT;
  621. if (n < reg_list.n)
  622. return -E2BIG;
  623. reg_dest = user_list->reg;
  624. if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
  625. sizeof(kvm_mips_get_one_regs)))
  626. return -EFAULT;
  627. return 0;
  628. }
  629. case KVM_NMI:
  630. /* Treat the NMI as a CPU reset */
  631. r = kvm_mips_reset_vcpu(vcpu);
  632. break;
  633. case KVM_INTERRUPT:
  634. {
  635. struct kvm_mips_interrupt irq;
  636. r = -EFAULT;
  637. if (copy_from_user(&irq, argp, sizeof(irq)))
  638. goto out;
  639. kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
  640. irq.irq);
  641. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  642. break;
  643. }
  644. default:
  645. r = -ENOIOCTLCMD;
  646. }
  647. out:
  648. return r;
  649. }
  650. /* Get (and clear) the dirty memory log for a memory slot. */
  651. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  652. {
  653. struct kvm_memory_slot *memslot;
  654. unsigned long ga, ga_end;
  655. int is_dirty = 0;
  656. int r;
  657. unsigned long n;
  658. mutex_lock(&kvm->slots_lock);
  659. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  660. if (r)
  661. goto out;
  662. /* If nothing is dirty, don't bother messing with page tables. */
  663. if (is_dirty) {
  664. memslot = &kvm->memslots->memslots[log->slot];
  665. ga = memslot->base_gfn << PAGE_SHIFT;
  666. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  667. kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
  668. ga_end);
  669. n = kvm_dirty_bitmap_bytes(memslot);
  670. memset(memslot->dirty_bitmap, 0, n);
  671. }
  672. r = 0;
  673. out:
  674. mutex_unlock(&kvm->slots_lock);
  675. return r;
  676. }
  677. long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
  678. {
  679. long r;
  680. switch (ioctl) {
  681. default:
  682. r = -ENOIOCTLCMD;
  683. }
  684. return r;
  685. }
  686. int kvm_arch_init(void *opaque)
  687. {
  688. if (kvm_mips_callbacks) {
  689. kvm_err("kvm: module already exists\n");
  690. return -EEXIST;
  691. }
  692. return kvm_mips_emulation_init(&kvm_mips_callbacks);
  693. }
  694. void kvm_arch_exit(void)
  695. {
  696. kvm_mips_callbacks = NULL;
  697. }
  698. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  699. struct kvm_sregs *sregs)
  700. {
  701. return -ENOIOCTLCMD;
  702. }
  703. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  704. struct kvm_sregs *sregs)
  705. {
  706. return -ENOIOCTLCMD;
  707. }
  708. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  709. {
  710. }
  711. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  712. {
  713. return -ENOIOCTLCMD;
  714. }
  715. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  716. {
  717. return -ENOIOCTLCMD;
  718. }
  719. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  720. {
  721. return VM_FAULT_SIGBUS;
  722. }
  723. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  724. {
  725. int r;
  726. switch (ext) {
  727. case KVM_CAP_ONE_REG:
  728. r = 1;
  729. break;
  730. case KVM_CAP_COALESCED_MMIO:
  731. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  732. break;
  733. default:
  734. r = 0;
  735. break;
  736. }
  737. return r;
  738. }
  739. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  740. {
  741. return kvm_mips_pending_timer(vcpu);
  742. }
  743. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
  744. {
  745. int i;
  746. struct mips_coproc *cop0;
  747. if (!vcpu)
  748. return -1;
  749. kvm_debug("VCPU Register Dump:\n");
  750. kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
  751. kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
  752. for (i = 0; i < 32; i += 4) {
  753. kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
  754. vcpu->arch.gprs[i],
  755. vcpu->arch.gprs[i + 1],
  756. vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
  757. }
  758. kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
  759. kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
  760. cop0 = vcpu->arch.cop0;
  761. kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
  762. kvm_read_c0_guest_status(cop0),
  763. kvm_read_c0_guest_cause(cop0));
  764. kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
  765. return 0;
  766. }
  767. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  768. {
  769. int i;
  770. for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  771. vcpu->arch.gprs[i] = regs->gpr[i];
  772. vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
  773. vcpu->arch.hi = regs->hi;
  774. vcpu->arch.lo = regs->lo;
  775. vcpu->arch.pc = regs->pc;
  776. return 0;
  777. }
  778. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  779. {
  780. int i;
  781. for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  782. regs->gpr[i] = vcpu->arch.gprs[i];
  783. regs->hi = vcpu->arch.hi;
  784. regs->lo = vcpu->arch.lo;
  785. regs->pc = vcpu->arch.pc;
  786. return 0;
  787. }
  788. static void kvm_mips_comparecount_func(unsigned long data)
  789. {
  790. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  791. kvm_mips_callbacks->queue_timer_int(vcpu);
  792. vcpu->arch.wait = 0;
  793. if (waitqueue_active(&vcpu->wq))
  794. wake_up_interruptible(&vcpu->wq);
  795. }
  796. /* low level hrtimer wake routine */
  797. static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
  798. {
  799. struct kvm_vcpu *vcpu;
  800. vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
  801. kvm_mips_comparecount_func((unsigned long) vcpu);
  802. return kvm_mips_count_timeout(vcpu);
  803. }
  804. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  805. {
  806. kvm_mips_callbacks->vcpu_init(vcpu);
  807. hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
  808. HRTIMER_MODE_REL);
  809. vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
  810. return 0;
  811. }
  812. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  813. struct kvm_translation *tr)
  814. {
  815. return 0;
  816. }
  817. /* Initial guest state */
  818. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  819. {
  820. return kvm_mips_callbacks->vcpu_setup(vcpu);
  821. }
  822. static void kvm_mips_set_c0_status(void)
  823. {
  824. uint32_t status = read_c0_status();
  825. if (cpu_has_dsp)
  826. status |= (ST0_MX);
  827. write_c0_status(status);
  828. ehb();
  829. }
  830. /*
  831. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  832. */
  833. int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  834. {
  835. uint32_t cause = vcpu->arch.host_cp0_cause;
  836. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  837. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  838. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  839. enum emulation_result er = EMULATE_DONE;
  840. int ret = RESUME_GUEST;
  841. /* re-enable HTW before enabling interrupts */
  842. htw_start();
  843. /* Set a default exit reason */
  844. run->exit_reason = KVM_EXIT_UNKNOWN;
  845. run->ready_for_interrupt_injection = 1;
  846. /*
  847. * Set the appropriate status bits based on host CPU features,
  848. * before we hit the scheduler
  849. */
  850. kvm_mips_set_c0_status();
  851. local_irq_enable();
  852. kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
  853. cause, opc, run, vcpu);
  854. /*
  855. * Do a privilege check, if in UM most of these exit conditions end up
  856. * causing an exception to be delivered to the Guest Kernel
  857. */
  858. er = kvm_mips_check_privilege(cause, opc, run, vcpu);
  859. if (er == EMULATE_PRIV_FAIL) {
  860. goto skip_emul;
  861. } else if (er == EMULATE_FAIL) {
  862. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  863. ret = RESUME_HOST;
  864. goto skip_emul;
  865. }
  866. switch (exccode) {
  867. case T_INT:
  868. kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
  869. ++vcpu->stat.int_exits;
  870. trace_kvm_exit(vcpu, INT_EXITS);
  871. if (need_resched())
  872. cond_resched();
  873. ret = RESUME_GUEST;
  874. break;
  875. case T_COP_UNUSABLE:
  876. kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
  877. ++vcpu->stat.cop_unusable_exits;
  878. trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
  879. ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
  880. /* XXXKYMA: Might need to return to user space */
  881. if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
  882. ret = RESUME_HOST;
  883. break;
  884. case T_TLB_MOD:
  885. ++vcpu->stat.tlbmod_exits;
  886. trace_kvm_exit(vcpu, TLBMOD_EXITS);
  887. ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
  888. break;
  889. case T_TLB_ST_MISS:
  890. kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
  891. cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
  892. badvaddr);
  893. ++vcpu->stat.tlbmiss_st_exits;
  894. trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
  895. ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
  896. break;
  897. case T_TLB_LD_MISS:
  898. kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  899. cause, opc, badvaddr);
  900. ++vcpu->stat.tlbmiss_ld_exits;
  901. trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
  902. ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
  903. break;
  904. case T_ADDR_ERR_ST:
  905. ++vcpu->stat.addrerr_st_exits;
  906. trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
  907. ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
  908. break;
  909. case T_ADDR_ERR_LD:
  910. ++vcpu->stat.addrerr_ld_exits;
  911. trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
  912. ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
  913. break;
  914. case T_SYSCALL:
  915. ++vcpu->stat.syscall_exits;
  916. trace_kvm_exit(vcpu, SYSCALL_EXITS);
  917. ret = kvm_mips_callbacks->handle_syscall(vcpu);
  918. break;
  919. case T_RES_INST:
  920. ++vcpu->stat.resvd_inst_exits;
  921. trace_kvm_exit(vcpu, RESVD_INST_EXITS);
  922. ret = kvm_mips_callbacks->handle_res_inst(vcpu);
  923. break;
  924. case T_BREAK:
  925. ++vcpu->stat.break_inst_exits;
  926. trace_kvm_exit(vcpu, BREAK_INST_EXITS);
  927. ret = kvm_mips_callbacks->handle_break(vcpu);
  928. break;
  929. default:
  930. kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
  931. exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
  932. kvm_read_c0_guest_status(vcpu->arch.cop0));
  933. kvm_arch_vcpu_dump_regs(vcpu);
  934. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  935. ret = RESUME_HOST;
  936. break;
  937. }
  938. skip_emul:
  939. local_irq_disable();
  940. if (er == EMULATE_DONE && !(ret & RESUME_HOST))
  941. kvm_mips_deliver_interrupts(vcpu, cause);
  942. if (!(ret & RESUME_HOST)) {
  943. /* Only check for signals if not already exiting to userspace */
  944. if (signal_pending(current)) {
  945. run->exit_reason = KVM_EXIT_INTR;
  946. ret = (-EINTR << 2) | RESUME_HOST;
  947. ++vcpu->stat.signal_exits;
  948. trace_kvm_exit(vcpu, SIGNAL_EXITS);
  949. }
  950. }
  951. /* Disable HTW before returning to guest or host */
  952. htw_stop();
  953. return ret;
  954. }
  955. int __init kvm_mips_init(void)
  956. {
  957. int ret;
  958. ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  959. if (ret)
  960. return ret;
  961. /*
  962. * On MIPS, kernel modules are executed from "mapped space", which
  963. * requires TLBs. The TLB handling code is statically linked with
  964. * the rest of the kernel (tlb.c) to avoid the possibility of
  965. * double faulting. The issue is that the TLB code references
  966. * routines that are part of the the KVM module, which are only
  967. * available once the module is loaded.
  968. */
  969. kvm_mips_gfn_to_pfn = gfn_to_pfn;
  970. kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
  971. kvm_mips_is_error_pfn = is_error_pfn;
  972. pr_info("KVM/MIPS Initialized\n");
  973. return 0;
  974. }
  975. void __exit kvm_mips_exit(void)
  976. {
  977. kvm_exit();
  978. kvm_mips_gfn_to_pfn = NULL;
  979. kvm_mips_release_pfn_clean = NULL;
  980. kvm_mips_is_error_pfn = NULL;
  981. pr_info("KVM/MIPS unloaded\n");
  982. }
  983. module_init(kvm_mips_init);
  984. module_exit(kvm_mips_exit);
  985. EXPORT_TRACEPOINT_SYMBOL(kvm_exit);