kvm_mips.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: MIPS specific KVM APIs
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/fs.h>
  16. #include <linux/bootmem.h>
  17. #include <asm/page.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/mmu_context.h>
  20. #include <linux/kvm_host.h>
  21. #include "kvm_mips_int.h"
  22. #include "kvm_mips_comm.h"
  23. #define CREATE_TRACE_POINTS
  24. #include "trace.h"
  25. #ifndef VECTORSPACING
  26. #define VECTORSPACING 0x100 /* for EI/VI mode */
  27. #endif
  28. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  29. struct kvm_stats_debugfs_item debugfs_entries[] = {
  30. { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
  31. { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
  32. { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
  33. { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
  34. { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  35. { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
  36. { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
  37. { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
  38. { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
  39. { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
  40. { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
  41. { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
  42. { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
  43. { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  44. { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
  45. {NULL}
  46. };
  47. static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
  48. {
  49. int i;
  50. for_each_possible_cpu(i) {
  51. vcpu->arch.guest_kernel_asid[i] = 0;
  52. vcpu->arch.guest_user_asid[i] = 0;
  53. }
  54. return 0;
  55. }
  56. /*
  57. * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  58. * Config7, so we are "runnable" if interrupts are pending
  59. */
  60. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  61. {
  62. return !!(vcpu->arch.pending_exceptions);
  63. }
  64. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  65. {
  66. return 1;
  67. }
  68. int kvm_arch_hardware_enable(void *garbage)
  69. {
  70. return 0;
  71. }
  72. void kvm_arch_hardware_disable(void *garbage)
  73. {
  74. }
  75. int kvm_arch_hardware_setup(void)
  76. {
  77. return 0;
  78. }
  79. void kvm_arch_hardware_unsetup(void)
  80. {
  81. }
  82. void kvm_arch_check_processor_compat(void *rtn)
  83. {
  84. int *r = (int *)rtn;
  85. *r = 0;
  86. return;
  87. }
  88. static void kvm_mips_init_tlbs(struct kvm *kvm)
  89. {
  90. unsigned long wired;
  91. /*
  92. * Add a wired entry to the TLB, it is used to map the commpage to
  93. * the Guest kernel
  94. */
  95. wired = read_c0_wired();
  96. write_c0_wired(wired + 1);
  97. mtc0_tlbw_hazard();
  98. kvm->arch.commpage_tlb = wired;
  99. kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
  100. kvm->arch.commpage_tlb);
  101. }
  102. static void kvm_mips_init_vm_percpu(void *arg)
  103. {
  104. struct kvm *kvm = (struct kvm *)arg;
  105. kvm_mips_init_tlbs(kvm);
  106. kvm_mips_callbacks->vm_init(kvm);
  107. }
  108. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  109. {
  110. if (atomic_inc_return(&kvm_mips_instance) == 1) {
  111. kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
  112. __func__);
  113. on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
  114. }
  115. return 0;
  116. }
  117. void kvm_mips_free_vcpus(struct kvm *kvm)
  118. {
  119. unsigned int i;
  120. struct kvm_vcpu *vcpu;
  121. /* Put the pages we reserved for the guest pmap */
  122. for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
  123. if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
  124. kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
  125. }
  126. kfree(kvm->arch.guest_pmap);
  127. kvm_for_each_vcpu(i, vcpu, kvm) {
  128. kvm_arch_vcpu_free(vcpu);
  129. }
  130. mutex_lock(&kvm->lock);
  131. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  132. kvm->vcpus[i] = NULL;
  133. atomic_set(&kvm->online_vcpus, 0);
  134. mutex_unlock(&kvm->lock);
  135. }
  136. void kvm_arch_sync_events(struct kvm *kvm)
  137. {
  138. }
  139. static void kvm_mips_uninit_tlbs(void *arg)
  140. {
  141. /* Restore wired count */
  142. write_c0_wired(0);
  143. mtc0_tlbw_hazard();
  144. /* Clear out all the TLBs */
  145. kvm_local_flush_tlb_all();
  146. }
  147. void kvm_arch_destroy_vm(struct kvm *kvm)
  148. {
  149. kvm_mips_free_vcpus(kvm);
  150. /* If this is the last instance, restore wired count */
  151. if (atomic_dec_return(&kvm_mips_instance) == 0) {
  152. kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
  153. __func__);
  154. on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
  155. }
  156. }
  157. long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
  158. unsigned long arg)
  159. {
  160. return -ENOIOCTLCMD;
  161. }
  162. void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  163. struct kvm_memory_slot *dont)
  164. {
  165. }
  166. int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  167. unsigned long npages)
  168. {
  169. return 0;
  170. }
  171. void kvm_arch_memslots_updated(struct kvm *kvm)
  172. {
  173. }
  174. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  175. struct kvm_memory_slot *memslot,
  176. struct kvm_userspace_memory_region *mem,
  177. enum kvm_mr_change change)
  178. {
  179. return 0;
  180. }
  181. void kvm_arch_commit_memory_region(struct kvm *kvm,
  182. struct kvm_userspace_memory_region *mem,
  183. const struct kvm_memory_slot *old,
  184. enum kvm_mr_change change)
  185. {
  186. unsigned long npages = 0;
  187. int i, err = 0;
  188. kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
  189. __func__, kvm, mem->slot, mem->guest_phys_addr,
  190. mem->memory_size, mem->userspace_addr);
  191. /* Setup Guest PMAP table */
  192. if (!kvm->arch.guest_pmap) {
  193. if (mem->slot == 0)
  194. npages = mem->memory_size >> PAGE_SHIFT;
  195. if (npages) {
  196. kvm->arch.guest_pmap_npages = npages;
  197. kvm->arch.guest_pmap =
  198. kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
  199. if (!kvm->arch.guest_pmap) {
  200. kvm_err("Failed to allocate guest PMAP");
  201. err = -ENOMEM;
  202. goto out;
  203. }
  204. kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
  205. npages, kvm->arch.guest_pmap);
  206. /* Now setup the page table */
  207. for (i = 0; i < npages; i++)
  208. kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
  209. }
  210. }
  211. out:
  212. return;
  213. }
  214. void kvm_arch_flush_shadow_all(struct kvm *kvm)
  215. {
  216. }
  217. void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  218. struct kvm_memory_slot *slot)
  219. {
  220. }
  221. void kvm_arch_flush_shadow(struct kvm *kvm)
  222. {
  223. }
  224. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  225. {
  226. int err, size, offset;
  227. void *gebase;
  228. int i;
  229. struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
  230. if (!vcpu) {
  231. err = -ENOMEM;
  232. goto out;
  233. }
  234. err = kvm_vcpu_init(vcpu, kvm, id);
  235. if (err)
  236. goto out_free_cpu;
  237. kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
  238. /*
  239. * Allocate space for host mode exception handlers that handle
  240. * guest mode exits
  241. */
  242. if (cpu_has_veic || cpu_has_vint)
  243. size = 0x200 + VECTORSPACING * 64;
  244. else
  245. size = 0x4000;
  246. /* Save Linux EBASE */
  247. vcpu->arch.host_ebase = (void *)read_c0_ebase();
  248. gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
  249. if (!gebase) {
  250. err = -ENOMEM;
  251. goto out_free_cpu;
  252. }
  253. kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
  254. ALIGN(size, PAGE_SIZE), gebase);
  255. /* Save new ebase */
  256. vcpu->arch.guest_ebase = gebase;
  257. /* Copy L1 Guest Exception handler to correct offset */
  258. /* TLB Refill, EXL = 0 */
  259. memcpy(gebase, mips32_exception,
  260. mips32_exceptionEnd - mips32_exception);
  261. /* General Exception Entry point */
  262. memcpy(gebase + 0x180, mips32_exception,
  263. mips32_exceptionEnd - mips32_exception);
  264. /* For vectored interrupts poke the exception code @ all offsets 0-7 */
  265. for (i = 0; i < 8; i++) {
  266. kvm_debug("L1 Vectored handler @ %p\n",
  267. gebase + 0x200 + (i * VECTORSPACING));
  268. memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
  269. mips32_exceptionEnd - mips32_exception);
  270. }
  271. /* General handler, relocate to unmapped space for sanity's sake */
  272. offset = 0x2000;
  273. kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
  274. gebase + offset,
  275. mips32_GuestExceptionEnd - mips32_GuestException);
  276. memcpy(gebase + offset, mips32_GuestException,
  277. mips32_GuestExceptionEnd - mips32_GuestException);
  278. /* Invalidate the icache for these ranges */
  279. local_flush_icache_range((unsigned long)gebase,
  280. (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
  281. /*
  282. * Allocate comm page for guest kernel, a TLB will be reserved for
  283. * mapping GVA @ 0xFFFF8000 to this page
  284. */
  285. vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
  286. if (!vcpu->arch.kseg0_commpage) {
  287. err = -ENOMEM;
  288. goto out_free_gebase;
  289. }
  290. kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
  291. kvm_mips_commpage_init(vcpu);
  292. /* Init */
  293. vcpu->arch.last_sched_cpu = -1;
  294. /* Start off the timer */
  295. kvm_mips_init_count(vcpu);
  296. return vcpu;
  297. out_free_gebase:
  298. kfree(gebase);
  299. out_free_cpu:
  300. kfree(vcpu);
  301. out:
  302. return ERR_PTR(err);
  303. }
  304. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  305. {
  306. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  307. kvm_vcpu_uninit(vcpu);
  308. kvm_mips_dump_stats(vcpu);
  309. kfree(vcpu->arch.guest_ebase);
  310. kfree(vcpu->arch.kseg0_commpage);
  311. }
  312. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  313. {
  314. kvm_arch_vcpu_free(vcpu);
  315. }
  316. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  317. struct kvm_guest_debug *dbg)
  318. {
  319. return -ENOIOCTLCMD;
  320. }
  321. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  322. {
  323. int r = 0;
  324. sigset_t sigsaved;
  325. if (vcpu->sigset_active)
  326. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  327. if (vcpu->mmio_needed) {
  328. if (!vcpu->mmio_is_write)
  329. kvm_mips_complete_mmio_load(vcpu, run);
  330. vcpu->mmio_needed = 0;
  331. }
  332. local_irq_disable();
  333. /* Check if we have any exceptions/interrupts pending */
  334. kvm_mips_deliver_interrupts(vcpu,
  335. kvm_read_c0_guest_cause(vcpu->arch.cop0));
  336. kvm_guest_enter();
  337. r = __kvm_mips_vcpu_run(run, vcpu);
  338. kvm_guest_exit();
  339. local_irq_enable();
  340. if (vcpu->sigset_active)
  341. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  342. return r;
  343. }
  344. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  345. struct kvm_mips_interrupt *irq)
  346. {
  347. int intr = (int)irq->irq;
  348. struct kvm_vcpu *dvcpu = NULL;
  349. if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
  350. kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
  351. (int)intr);
  352. if (irq->cpu == -1)
  353. dvcpu = vcpu;
  354. else
  355. dvcpu = vcpu->kvm->vcpus[irq->cpu];
  356. if (intr == 2 || intr == 3 || intr == 4) {
  357. kvm_mips_callbacks->queue_io_int(dvcpu, irq);
  358. } else if (intr == -2 || intr == -3 || intr == -4) {
  359. kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
  360. } else {
  361. kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
  362. irq->cpu, irq->irq);
  363. return -EINVAL;
  364. }
  365. dvcpu->arch.wait = 0;
  366. if (waitqueue_active(&dvcpu->wq))
  367. wake_up_interruptible(&dvcpu->wq);
  368. return 0;
  369. }
  370. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  371. struct kvm_mp_state *mp_state)
  372. {
  373. return -ENOIOCTLCMD;
  374. }
  375. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  376. struct kvm_mp_state *mp_state)
  377. {
  378. return -ENOIOCTLCMD;
  379. }
  380. static u64 kvm_mips_get_one_regs[] = {
  381. KVM_REG_MIPS_R0,
  382. KVM_REG_MIPS_R1,
  383. KVM_REG_MIPS_R2,
  384. KVM_REG_MIPS_R3,
  385. KVM_REG_MIPS_R4,
  386. KVM_REG_MIPS_R5,
  387. KVM_REG_MIPS_R6,
  388. KVM_REG_MIPS_R7,
  389. KVM_REG_MIPS_R8,
  390. KVM_REG_MIPS_R9,
  391. KVM_REG_MIPS_R10,
  392. KVM_REG_MIPS_R11,
  393. KVM_REG_MIPS_R12,
  394. KVM_REG_MIPS_R13,
  395. KVM_REG_MIPS_R14,
  396. KVM_REG_MIPS_R15,
  397. KVM_REG_MIPS_R16,
  398. KVM_REG_MIPS_R17,
  399. KVM_REG_MIPS_R18,
  400. KVM_REG_MIPS_R19,
  401. KVM_REG_MIPS_R20,
  402. KVM_REG_MIPS_R21,
  403. KVM_REG_MIPS_R22,
  404. KVM_REG_MIPS_R23,
  405. KVM_REG_MIPS_R24,
  406. KVM_REG_MIPS_R25,
  407. KVM_REG_MIPS_R26,
  408. KVM_REG_MIPS_R27,
  409. KVM_REG_MIPS_R28,
  410. KVM_REG_MIPS_R29,
  411. KVM_REG_MIPS_R30,
  412. KVM_REG_MIPS_R31,
  413. KVM_REG_MIPS_HI,
  414. KVM_REG_MIPS_LO,
  415. KVM_REG_MIPS_PC,
  416. KVM_REG_MIPS_CP0_INDEX,
  417. KVM_REG_MIPS_CP0_CONTEXT,
  418. KVM_REG_MIPS_CP0_USERLOCAL,
  419. KVM_REG_MIPS_CP0_PAGEMASK,
  420. KVM_REG_MIPS_CP0_WIRED,
  421. KVM_REG_MIPS_CP0_HWRENA,
  422. KVM_REG_MIPS_CP0_BADVADDR,
  423. KVM_REG_MIPS_CP0_COUNT,
  424. KVM_REG_MIPS_CP0_ENTRYHI,
  425. KVM_REG_MIPS_CP0_COMPARE,
  426. KVM_REG_MIPS_CP0_STATUS,
  427. KVM_REG_MIPS_CP0_CAUSE,
  428. KVM_REG_MIPS_CP0_EPC,
  429. KVM_REG_MIPS_CP0_CONFIG,
  430. KVM_REG_MIPS_CP0_CONFIG1,
  431. KVM_REG_MIPS_CP0_CONFIG2,
  432. KVM_REG_MIPS_CP0_CONFIG3,
  433. KVM_REG_MIPS_CP0_CONFIG7,
  434. KVM_REG_MIPS_CP0_ERROREPC,
  435. KVM_REG_MIPS_COUNT_CTL,
  436. KVM_REG_MIPS_COUNT_RESUME,
  437. KVM_REG_MIPS_COUNT_HZ,
  438. };
  439. static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
  440. const struct kvm_one_reg *reg)
  441. {
  442. struct mips_coproc *cop0 = vcpu->arch.cop0;
  443. int ret;
  444. s64 v;
  445. switch (reg->id) {
  446. case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
  447. v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
  448. break;
  449. case KVM_REG_MIPS_HI:
  450. v = (long)vcpu->arch.hi;
  451. break;
  452. case KVM_REG_MIPS_LO:
  453. v = (long)vcpu->arch.lo;
  454. break;
  455. case KVM_REG_MIPS_PC:
  456. v = (long)vcpu->arch.pc;
  457. break;
  458. case KVM_REG_MIPS_CP0_INDEX:
  459. v = (long)kvm_read_c0_guest_index(cop0);
  460. break;
  461. case KVM_REG_MIPS_CP0_CONTEXT:
  462. v = (long)kvm_read_c0_guest_context(cop0);
  463. break;
  464. case KVM_REG_MIPS_CP0_USERLOCAL:
  465. v = (long)kvm_read_c0_guest_userlocal(cop0);
  466. break;
  467. case KVM_REG_MIPS_CP0_PAGEMASK:
  468. v = (long)kvm_read_c0_guest_pagemask(cop0);
  469. break;
  470. case KVM_REG_MIPS_CP0_WIRED:
  471. v = (long)kvm_read_c0_guest_wired(cop0);
  472. break;
  473. case KVM_REG_MIPS_CP0_HWRENA:
  474. v = (long)kvm_read_c0_guest_hwrena(cop0);
  475. break;
  476. case KVM_REG_MIPS_CP0_BADVADDR:
  477. v = (long)kvm_read_c0_guest_badvaddr(cop0);
  478. break;
  479. case KVM_REG_MIPS_CP0_ENTRYHI:
  480. v = (long)kvm_read_c0_guest_entryhi(cop0);
  481. break;
  482. case KVM_REG_MIPS_CP0_COMPARE:
  483. v = (long)kvm_read_c0_guest_compare(cop0);
  484. break;
  485. case KVM_REG_MIPS_CP0_STATUS:
  486. v = (long)kvm_read_c0_guest_status(cop0);
  487. break;
  488. case KVM_REG_MIPS_CP0_CAUSE:
  489. v = (long)kvm_read_c0_guest_cause(cop0);
  490. break;
  491. case KVM_REG_MIPS_CP0_EPC:
  492. v = (long)kvm_read_c0_guest_epc(cop0);
  493. break;
  494. case KVM_REG_MIPS_CP0_ERROREPC:
  495. v = (long)kvm_read_c0_guest_errorepc(cop0);
  496. break;
  497. case KVM_REG_MIPS_CP0_CONFIG:
  498. v = (long)kvm_read_c0_guest_config(cop0);
  499. break;
  500. case KVM_REG_MIPS_CP0_CONFIG1:
  501. v = (long)kvm_read_c0_guest_config1(cop0);
  502. break;
  503. case KVM_REG_MIPS_CP0_CONFIG2:
  504. v = (long)kvm_read_c0_guest_config2(cop0);
  505. break;
  506. case KVM_REG_MIPS_CP0_CONFIG3:
  507. v = (long)kvm_read_c0_guest_config3(cop0);
  508. break;
  509. case KVM_REG_MIPS_CP0_CONFIG7:
  510. v = (long)kvm_read_c0_guest_config7(cop0);
  511. break;
  512. /* registers to be handled specially */
  513. case KVM_REG_MIPS_CP0_COUNT:
  514. case KVM_REG_MIPS_COUNT_CTL:
  515. case KVM_REG_MIPS_COUNT_RESUME:
  516. case KVM_REG_MIPS_COUNT_HZ:
  517. ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
  518. if (ret)
  519. return ret;
  520. break;
  521. default:
  522. return -EINVAL;
  523. }
  524. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  525. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  526. return put_user(v, uaddr64);
  527. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  528. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  529. u32 v32 = (u32)v;
  530. return put_user(v32, uaddr32);
  531. } else {
  532. return -EINVAL;
  533. }
  534. }
  535. static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
  536. const struct kvm_one_reg *reg)
  537. {
  538. struct mips_coproc *cop0 = vcpu->arch.cop0;
  539. u64 v;
  540. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  541. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  542. if (get_user(v, uaddr64) != 0)
  543. return -EFAULT;
  544. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  545. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  546. s32 v32;
  547. if (get_user(v32, uaddr32) != 0)
  548. return -EFAULT;
  549. v = (s64)v32;
  550. } else {
  551. return -EINVAL;
  552. }
  553. switch (reg->id) {
  554. case KVM_REG_MIPS_R0:
  555. /* Silently ignore requests to set $0 */
  556. break;
  557. case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
  558. vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
  559. break;
  560. case KVM_REG_MIPS_HI:
  561. vcpu->arch.hi = v;
  562. break;
  563. case KVM_REG_MIPS_LO:
  564. vcpu->arch.lo = v;
  565. break;
  566. case KVM_REG_MIPS_PC:
  567. vcpu->arch.pc = v;
  568. break;
  569. case KVM_REG_MIPS_CP0_INDEX:
  570. kvm_write_c0_guest_index(cop0, v);
  571. break;
  572. case KVM_REG_MIPS_CP0_CONTEXT:
  573. kvm_write_c0_guest_context(cop0, v);
  574. break;
  575. case KVM_REG_MIPS_CP0_USERLOCAL:
  576. kvm_write_c0_guest_userlocal(cop0, v);
  577. break;
  578. case KVM_REG_MIPS_CP0_PAGEMASK:
  579. kvm_write_c0_guest_pagemask(cop0, v);
  580. break;
  581. case KVM_REG_MIPS_CP0_WIRED:
  582. kvm_write_c0_guest_wired(cop0, v);
  583. break;
  584. case KVM_REG_MIPS_CP0_HWRENA:
  585. kvm_write_c0_guest_hwrena(cop0, v);
  586. break;
  587. case KVM_REG_MIPS_CP0_BADVADDR:
  588. kvm_write_c0_guest_badvaddr(cop0, v);
  589. break;
  590. case KVM_REG_MIPS_CP0_ENTRYHI:
  591. kvm_write_c0_guest_entryhi(cop0, v);
  592. break;
  593. case KVM_REG_MIPS_CP0_STATUS:
  594. kvm_write_c0_guest_status(cop0, v);
  595. break;
  596. case KVM_REG_MIPS_CP0_EPC:
  597. kvm_write_c0_guest_epc(cop0, v);
  598. break;
  599. case KVM_REG_MIPS_CP0_ERROREPC:
  600. kvm_write_c0_guest_errorepc(cop0, v);
  601. break;
  602. /* registers to be handled specially */
  603. case KVM_REG_MIPS_CP0_COUNT:
  604. case KVM_REG_MIPS_CP0_COMPARE:
  605. case KVM_REG_MIPS_CP0_CAUSE:
  606. case KVM_REG_MIPS_COUNT_CTL:
  607. case KVM_REG_MIPS_COUNT_RESUME:
  608. case KVM_REG_MIPS_COUNT_HZ:
  609. return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
  610. default:
  611. return -EINVAL;
  612. }
  613. return 0;
  614. }
  615. long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
  616. unsigned long arg)
  617. {
  618. struct kvm_vcpu *vcpu = filp->private_data;
  619. void __user *argp = (void __user *)arg;
  620. long r;
  621. switch (ioctl) {
  622. case KVM_SET_ONE_REG:
  623. case KVM_GET_ONE_REG: {
  624. struct kvm_one_reg reg;
  625. if (copy_from_user(&reg, argp, sizeof(reg)))
  626. return -EFAULT;
  627. if (ioctl == KVM_SET_ONE_REG)
  628. return kvm_mips_set_reg(vcpu, &reg);
  629. else
  630. return kvm_mips_get_reg(vcpu, &reg);
  631. }
  632. case KVM_GET_REG_LIST: {
  633. struct kvm_reg_list __user *user_list = argp;
  634. u64 __user *reg_dest;
  635. struct kvm_reg_list reg_list;
  636. unsigned n;
  637. if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
  638. return -EFAULT;
  639. n = reg_list.n;
  640. reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
  641. if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
  642. return -EFAULT;
  643. if (n < reg_list.n)
  644. return -E2BIG;
  645. reg_dest = user_list->reg;
  646. if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
  647. sizeof(kvm_mips_get_one_regs)))
  648. return -EFAULT;
  649. return 0;
  650. }
  651. case KVM_NMI:
  652. /* Treat the NMI as a CPU reset */
  653. r = kvm_mips_reset_vcpu(vcpu);
  654. break;
  655. case KVM_INTERRUPT:
  656. {
  657. struct kvm_mips_interrupt irq;
  658. r = -EFAULT;
  659. if (copy_from_user(&irq, argp, sizeof(irq)))
  660. goto out;
  661. kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
  662. irq.irq);
  663. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  664. break;
  665. }
  666. default:
  667. r = -ENOIOCTLCMD;
  668. }
  669. out:
  670. return r;
  671. }
  672. /* Get (and clear) the dirty memory log for a memory slot. */
  673. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  674. {
  675. struct kvm_memory_slot *memslot;
  676. unsigned long ga, ga_end;
  677. int is_dirty = 0;
  678. int r;
  679. unsigned long n;
  680. mutex_lock(&kvm->slots_lock);
  681. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  682. if (r)
  683. goto out;
  684. /* If nothing is dirty, don't bother messing with page tables. */
  685. if (is_dirty) {
  686. memslot = &kvm->memslots->memslots[log->slot];
  687. ga = memslot->base_gfn << PAGE_SHIFT;
  688. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  689. printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
  690. ga_end);
  691. n = kvm_dirty_bitmap_bytes(memslot);
  692. memset(memslot->dirty_bitmap, 0, n);
  693. }
  694. r = 0;
  695. out:
  696. mutex_unlock(&kvm->slots_lock);
  697. return r;
  698. }
  699. long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
  700. {
  701. long r;
  702. switch (ioctl) {
  703. default:
  704. r = -ENOIOCTLCMD;
  705. }
  706. return r;
  707. }
  708. int kvm_arch_init(void *opaque)
  709. {
  710. int ret;
  711. if (kvm_mips_callbacks) {
  712. kvm_err("kvm: module already exists\n");
  713. return -EEXIST;
  714. }
  715. ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
  716. return ret;
  717. }
  718. void kvm_arch_exit(void)
  719. {
  720. kvm_mips_callbacks = NULL;
  721. }
  722. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  723. struct kvm_sregs *sregs)
  724. {
  725. return -ENOIOCTLCMD;
  726. }
  727. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  728. struct kvm_sregs *sregs)
  729. {
  730. return -ENOIOCTLCMD;
  731. }
  732. int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  733. {
  734. return 0;
  735. }
  736. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  737. {
  738. return -ENOIOCTLCMD;
  739. }
  740. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  741. {
  742. return -ENOIOCTLCMD;
  743. }
  744. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  745. {
  746. return VM_FAULT_SIGBUS;
  747. }
  748. int kvm_dev_ioctl_check_extension(long ext)
  749. {
  750. int r;
  751. switch (ext) {
  752. case KVM_CAP_ONE_REG:
  753. r = 1;
  754. break;
  755. case KVM_CAP_COALESCED_MMIO:
  756. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  757. break;
  758. default:
  759. r = 0;
  760. break;
  761. }
  762. return r;
  763. }
  764. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  765. {
  766. return kvm_mips_pending_timer(vcpu);
  767. }
  768. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
  769. {
  770. int i;
  771. struct mips_coproc *cop0;
  772. if (!vcpu)
  773. return -1;
  774. printk("VCPU Register Dump:\n");
  775. printk("\tpc = 0x%08lx\n", vcpu->arch.pc);
  776. printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
  777. for (i = 0; i < 32; i += 4) {
  778. printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
  779. vcpu->arch.gprs[i],
  780. vcpu->arch.gprs[i + 1],
  781. vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
  782. }
  783. printk("\thi: 0x%08lx\n", vcpu->arch.hi);
  784. printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
  785. cop0 = vcpu->arch.cop0;
  786. printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
  787. kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
  788. printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
  789. return 0;
  790. }
  791. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  792. {
  793. int i;
  794. for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  795. vcpu->arch.gprs[i] = regs->gpr[i];
  796. vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
  797. vcpu->arch.hi = regs->hi;
  798. vcpu->arch.lo = regs->lo;
  799. vcpu->arch.pc = regs->pc;
  800. return 0;
  801. }
  802. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  803. {
  804. int i;
  805. for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  806. regs->gpr[i] = vcpu->arch.gprs[i];
  807. regs->hi = vcpu->arch.hi;
  808. regs->lo = vcpu->arch.lo;
  809. regs->pc = vcpu->arch.pc;
  810. return 0;
  811. }
  812. static void kvm_mips_comparecount_func(unsigned long data)
  813. {
  814. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  815. kvm_mips_callbacks->queue_timer_int(vcpu);
  816. vcpu->arch.wait = 0;
  817. if (waitqueue_active(&vcpu->wq))
  818. wake_up_interruptible(&vcpu->wq);
  819. }
  820. /* low level hrtimer wake routine */
  821. static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
  822. {
  823. struct kvm_vcpu *vcpu;
  824. vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
  825. kvm_mips_comparecount_func((unsigned long) vcpu);
  826. return kvm_mips_count_timeout(vcpu);
  827. }
  828. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  829. {
  830. kvm_mips_callbacks->vcpu_init(vcpu);
  831. hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
  832. HRTIMER_MODE_REL);
  833. vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
  834. return 0;
  835. }
  836. void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
  837. {
  838. return;
  839. }
  840. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  841. struct kvm_translation *tr)
  842. {
  843. return 0;
  844. }
  845. /* Initial guest state */
  846. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  847. {
  848. return kvm_mips_callbacks->vcpu_setup(vcpu);
  849. }
  850. static void kvm_mips_set_c0_status(void)
  851. {
  852. uint32_t status = read_c0_status();
  853. if (cpu_has_fpu)
  854. status |= (ST0_CU1);
  855. if (cpu_has_dsp)
  856. status |= (ST0_MX);
  857. write_c0_status(status);
  858. ehb();
  859. }
  860. /*
  861. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  862. */
  863. int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  864. {
  865. uint32_t cause = vcpu->arch.host_cp0_cause;
  866. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  867. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  868. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  869. enum emulation_result er = EMULATE_DONE;
  870. int ret = RESUME_GUEST;
  871. /* Set a default exit reason */
  872. run->exit_reason = KVM_EXIT_UNKNOWN;
  873. run->ready_for_interrupt_injection = 1;
  874. /*
  875. * Set the appropriate status bits based on host CPU features,
  876. * before we hit the scheduler
  877. */
  878. kvm_mips_set_c0_status();
  879. local_irq_enable();
  880. kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
  881. cause, opc, run, vcpu);
  882. /*
  883. * Do a privilege check, if in UM most of these exit conditions end up
  884. * causing an exception to be delivered to the Guest Kernel
  885. */
  886. er = kvm_mips_check_privilege(cause, opc, run, vcpu);
  887. if (er == EMULATE_PRIV_FAIL) {
  888. goto skip_emul;
  889. } else if (er == EMULATE_FAIL) {
  890. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  891. ret = RESUME_HOST;
  892. goto skip_emul;
  893. }
  894. switch (exccode) {
  895. case T_INT:
  896. kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
  897. ++vcpu->stat.int_exits;
  898. trace_kvm_exit(vcpu, INT_EXITS);
  899. if (need_resched())
  900. cond_resched();
  901. ret = RESUME_GUEST;
  902. break;
  903. case T_COP_UNUSABLE:
  904. kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
  905. ++vcpu->stat.cop_unusable_exits;
  906. trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
  907. ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
  908. /* XXXKYMA: Might need to return to user space */
  909. if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
  910. ret = RESUME_HOST;
  911. break;
  912. case T_TLB_MOD:
  913. ++vcpu->stat.tlbmod_exits;
  914. trace_kvm_exit(vcpu, TLBMOD_EXITS);
  915. ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
  916. break;
  917. case T_TLB_ST_MISS:
  918. kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
  919. cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
  920. badvaddr);
  921. ++vcpu->stat.tlbmiss_st_exits;
  922. trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
  923. ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
  924. break;
  925. case T_TLB_LD_MISS:
  926. kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  927. cause, opc, badvaddr);
  928. ++vcpu->stat.tlbmiss_ld_exits;
  929. trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
  930. ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
  931. break;
  932. case T_ADDR_ERR_ST:
  933. ++vcpu->stat.addrerr_st_exits;
  934. trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
  935. ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
  936. break;
  937. case T_ADDR_ERR_LD:
  938. ++vcpu->stat.addrerr_ld_exits;
  939. trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
  940. ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
  941. break;
  942. case T_SYSCALL:
  943. ++vcpu->stat.syscall_exits;
  944. trace_kvm_exit(vcpu, SYSCALL_EXITS);
  945. ret = kvm_mips_callbacks->handle_syscall(vcpu);
  946. break;
  947. case T_RES_INST:
  948. ++vcpu->stat.resvd_inst_exits;
  949. trace_kvm_exit(vcpu, RESVD_INST_EXITS);
  950. ret = kvm_mips_callbacks->handle_res_inst(vcpu);
  951. break;
  952. case T_BREAK:
  953. ++vcpu->stat.break_inst_exits;
  954. trace_kvm_exit(vcpu, BREAK_INST_EXITS);
  955. ret = kvm_mips_callbacks->handle_break(vcpu);
  956. break;
  957. default:
  958. kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
  959. exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
  960. kvm_read_c0_guest_status(vcpu->arch.cop0));
  961. kvm_arch_vcpu_dump_regs(vcpu);
  962. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  963. ret = RESUME_HOST;
  964. break;
  965. }
  966. skip_emul:
  967. local_irq_disable();
  968. if (er == EMULATE_DONE && !(ret & RESUME_HOST))
  969. kvm_mips_deliver_interrupts(vcpu, cause);
  970. if (!(ret & RESUME_HOST)) {
  971. /* Only check for signals if not already exiting to userspace */
  972. if (signal_pending(current)) {
  973. run->exit_reason = KVM_EXIT_INTR;
  974. ret = (-EINTR << 2) | RESUME_HOST;
  975. ++vcpu->stat.signal_exits;
  976. trace_kvm_exit(vcpu, SIGNAL_EXITS);
  977. }
  978. }
  979. return ret;
  980. }
  981. int __init kvm_mips_init(void)
  982. {
  983. int ret;
  984. ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  985. if (ret)
  986. return ret;
  987. /*
  988. * On MIPS, kernel modules are executed from "mapped space", which
  989. * requires TLBs. The TLB handling code is statically linked with
  990. * the rest of the kernel (kvm_tlb.c) to avoid the possibility of
  991. * double faulting. The issue is that the TLB code references
  992. * routines that are part of the the KVM module, which are only
  993. * available once the module is loaded.
  994. */
  995. kvm_mips_gfn_to_pfn = gfn_to_pfn;
  996. kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
  997. kvm_mips_is_error_pfn = is_error_pfn;
  998. pr_info("KVM/MIPS Initialized\n");
  999. return 0;
  1000. }
  1001. void __exit kvm_mips_exit(void)
  1002. {
  1003. kvm_exit();
  1004. kvm_mips_gfn_to_pfn = NULL;
  1005. kvm_mips_release_pfn_clean = NULL;
  1006. kvm_mips_is_error_pfn = NULL;
  1007. pr_info("KVM/MIPS unloaded\n");
  1008. }
  1009. module_init(kvm_mips_init);
  1010. module_exit(kvm_mips_exit);
  1011. EXPORT_TRACEPOINT_SYMBOL(kvm_exit);