mips.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: MIPS specific KVM APIs
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/errno.h>
  13. #include <linux/err.h>
  14. #include <linux/kdebug.h>
  15. #include <linux/module.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/fs.h>
  18. #include <linux/bootmem.h>
  19. #include <asm/fpu.h>
  20. #include <asm/page.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/mmu_context.h>
  23. #include <asm/pgtable.h>
  24. #include <linux/kvm_host.h>
  25. #include "interrupt.h"
  26. #include "commpage.h"
  27. #define CREATE_TRACE_POINTS
  28. #include "trace.h"
  29. #ifndef VECTORSPACING
  30. #define VECTORSPACING 0x100 /* for EI/VI mode */
  31. #endif
  32. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  33. struct kvm_stats_debugfs_item debugfs_entries[] = {
  34. { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
  35. { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
  36. { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
  37. { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
  38. { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  39. { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
  40. { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
  41. { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
  42. { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
  43. { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
  44. { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
  45. { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
  46. { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
  47. { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
  48. { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
  49. { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
  50. { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
  51. { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  52. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
  53. { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
  54. { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
  55. { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
  56. {NULL}
  57. };
  58. static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
  59. {
  60. int i;
  61. for_each_possible_cpu(i) {
  62. vcpu->arch.guest_kernel_asid[i] = 0;
  63. vcpu->arch.guest_user_asid[i] = 0;
  64. }
  65. return 0;
  66. }
  67. /*
  68. * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  69. * Config7, so we are "runnable" if interrupts are pending
  70. */
  71. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  72. {
  73. return !!(vcpu->arch.pending_exceptions);
  74. }
  75. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  76. {
  77. return 1;
  78. }
  79. int kvm_arch_hardware_enable(void)
  80. {
  81. return 0;
  82. }
  83. int kvm_arch_hardware_setup(void)
  84. {
  85. return 0;
  86. }
  87. void kvm_arch_check_processor_compat(void *rtn)
  88. {
  89. *(int *)rtn = 0;
  90. }
  91. static void kvm_mips_init_tlbs(struct kvm *kvm)
  92. {
  93. unsigned long wired;
  94. /*
  95. * Add a wired entry to the TLB, it is used to map the commpage to
  96. * the Guest kernel
  97. */
  98. wired = read_c0_wired();
  99. write_c0_wired(wired + 1);
  100. mtc0_tlbw_hazard();
  101. kvm->arch.commpage_tlb = wired;
  102. kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
  103. kvm->arch.commpage_tlb);
  104. }
  105. static void kvm_mips_init_vm_percpu(void *arg)
  106. {
  107. struct kvm *kvm = (struct kvm *)arg;
  108. kvm_mips_init_tlbs(kvm);
  109. kvm_mips_callbacks->vm_init(kvm);
  110. }
  111. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  112. {
  113. if (atomic_inc_return(&kvm_mips_instance) == 1) {
  114. kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
  115. __func__);
  116. on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
  117. }
  118. return 0;
  119. }
  120. bool kvm_arch_has_vcpu_debugfs(void)
  121. {
  122. return false;
  123. }
  124. int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
  125. {
  126. return 0;
  127. }
  128. void kvm_mips_free_vcpus(struct kvm *kvm)
  129. {
  130. unsigned int i;
  131. struct kvm_vcpu *vcpu;
  132. /* Put the pages we reserved for the guest pmap */
  133. for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
  134. if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
  135. kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
  136. }
  137. kfree(kvm->arch.guest_pmap);
  138. kvm_for_each_vcpu(i, vcpu, kvm) {
  139. kvm_arch_vcpu_free(vcpu);
  140. }
  141. mutex_lock(&kvm->lock);
  142. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  143. kvm->vcpus[i] = NULL;
  144. atomic_set(&kvm->online_vcpus, 0);
  145. mutex_unlock(&kvm->lock);
  146. }
  147. static void kvm_mips_uninit_tlbs(void *arg)
  148. {
  149. /* Restore wired count */
  150. write_c0_wired(0);
  151. mtc0_tlbw_hazard();
  152. /* Clear out all the TLBs */
  153. kvm_local_flush_tlb_all();
  154. }
  155. void kvm_arch_destroy_vm(struct kvm *kvm)
  156. {
  157. kvm_mips_free_vcpus(kvm);
  158. /* If this is the last instance, restore wired count */
  159. if (atomic_dec_return(&kvm_mips_instance) == 0) {
  160. kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
  161. __func__);
  162. on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
  163. }
  164. }
  165. long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
  166. unsigned long arg)
  167. {
  168. return -ENOIOCTLCMD;
  169. }
  170. int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  171. unsigned long npages)
  172. {
  173. return 0;
  174. }
  175. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  176. struct kvm_memory_slot *memslot,
  177. const struct kvm_userspace_memory_region *mem,
  178. enum kvm_mr_change change)
  179. {
  180. return 0;
  181. }
  182. void kvm_arch_commit_memory_region(struct kvm *kvm,
  183. const struct kvm_userspace_memory_region *mem,
  184. const struct kvm_memory_slot *old,
  185. const struct kvm_memory_slot *new,
  186. enum kvm_mr_change change)
  187. {
  188. unsigned long npages = 0;
  189. int i;
  190. kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
  191. __func__, kvm, mem->slot, mem->guest_phys_addr,
  192. mem->memory_size, mem->userspace_addr);
  193. /* Setup Guest PMAP table */
  194. if (!kvm->arch.guest_pmap) {
  195. if (mem->slot == 0)
  196. npages = mem->memory_size >> PAGE_SHIFT;
  197. if (npages) {
  198. kvm->arch.guest_pmap_npages = npages;
  199. kvm->arch.guest_pmap =
  200. kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
  201. if (!kvm->arch.guest_pmap) {
  202. kvm_err("Failed to allocate guest PMAP\n");
  203. return;
  204. }
  205. kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
  206. npages, kvm->arch.guest_pmap);
  207. /* Now setup the page table */
  208. for (i = 0; i < npages; i++)
  209. kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
  210. }
  211. }
  212. }
  213. static inline void dump_handler(const char *symbol, void *start, void *end)
  214. {
  215. u32 *p;
  216. pr_debug("LEAF(%s)\n", symbol);
  217. pr_debug("\t.set push\n");
  218. pr_debug("\t.set noreorder\n");
  219. for (p = start; p < (u32 *)end; ++p)
  220. pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
  221. pr_debug("\t.set\tpop\n");
  222. pr_debug("\tEND(%s)\n", symbol);
  223. }
  224. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  225. {
  226. int err, size;
  227. void *gebase, *p, *handler;
  228. int i;
  229. struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
  230. if (!vcpu) {
  231. err = -ENOMEM;
  232. goto out;
  233. }
  234. err = kvm_vcpu_init(vcpu, kvm, id);
  235. if (err)
  236. goto out_free_cpu;
  237. kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
  238. /*
  239. * Allocate space for host mode exception handlers that handle
  240. * guest mode exits
  241. */
  242. if (cpu_has_veic || cpu_has_vint)
  243. size = 0x200 + VECTORSPACING * 64;
  244. else
  245. size = 0x4000;
  246. gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
  247. if (!gebase) {
  248. err = -ENOMEM;
  249. goto out_uninit_cpu;
  250. }
  251. kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
  252. ALIGN(size, PAGE_SIZE), gebase);
  253. /*
  254. * Check new ebase actually fits in CP0_EBase. The lack of a write gate
  255. * limits us to the low 512MB of physical address space. If the memory
  256. * we allocate is out of range, just give up now.
  257. */
  258. if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
  259. kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
  260. gebase);
  261. err = -ENOMEM;
  262. goto out_free_gebase;
  263. }
  264. /* Save new ebase */
  265. vcpu->arch.guest_ebase = gebase;
  266. /* Build guest exception vectors dynamically in unmapped memory */
  267. handler = gebase + 0x2000;
  268. /* TLB Refill, EXL = 0 */
  269. kvm_mips_build_exception(gebase, handler);
  270. /* General Exception Entry point */
  271. kvm_mips_build_exception(gebase + 0x180, handler);
  272. /* For vectored interrupts poke the exception code @ all offsets 0-7 */
  273. for (i = 0; i < 8; i++) {
  274. kvm_debug("L1 Vectored handler @ %p\n",
  275. gebase + 0x200 + (i * VECTORSPACING));
  276. kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
  277. handler);
  278. }
  279. /* General exit handler */
  280. p = handler;
  281. p = kvm_mips_build_exit(p);
  282. /* Guest entry routine */
  283. vcpu->arch.vcpu_run = p;
  284. p = kvm_mips_build_vcpu_run(p);
  285. /* Dump the generated code */
  286. pr_debug("#include <asm/asm.h>\n");
  287. pr_debug("#include <asm/regdef.h>\n");
  288. pr_debug("\n");
  289. dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
  290. dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
  291. dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
  292. /* Invalidate the icache for these ranges */
  293. local_flush_icache_range((unsigned long)gebase,
  294. (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
  295. /*
  296. * Allocate comm page for guest kernel, a TLB will be reserved for
  297. * mapping GVA @ 0xFFFF8000 to this page
  298. */
  299. vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
  300. if (!vcpu->arch.kseg0_commpage) {
  301. err = -ENOMEM;
  302. goto out_free_gebase;
  303. }
  304. kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
  305. kvm_mips_commpage_init(vcpu);
  306. /* Init */
  307. vcpu->arch.last_sched_cpu = -1;
  308. /* Start off the timer */
  309. kvm_mips_init_count(vcpu);
  310. return vcpu;
  311. out_free_gebase:
  312. kfree(gebase);
  313. out_uninit_cpu:
  314. kvm_vcpu_uninit(vcpu);
  315. out_free_cpu:
  316. kfree(vcpu);
  317. out:
  318. return ERR_PTR(err);
  319. }
  320. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  321. {
  322. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  323. kvm_vcpu_uninit(vcpu);
  324. kvm_mips_dump_stats(vcpu);
  325. kfree(vcpu->arch.guest_ebase);
  326. kfree(vcpu->arch.kseg0_commpage);
  327. kfree(vcpu);
  328. }
  329. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  330. {
  331. kvm_arch_vcpu_free(vcpu);
  332. }
  333. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  334. struct kvm_guest_debug *dbg)
  335. {
  336. return -ENOIOCTLCMD;
  337. }
  338. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  339. {
  340. int r = 0;
  341. sigset_t sigsaved;
  342. if (vcpu->sigset_active)
  343. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  344. if (vcpu->mmio_needed) {
  345. if (!vcpu->mmio_is_write)
  346. kvm_mips_complete_mmio_load(vcpu, run);
  347. vcpu->mmio_needed = 0;
  348. }
  349. lose_fpu(1);
  350. local_irq_disable();
  351. /* Check if we have any exceptions/interrupts pending */
  352. kvm_mips_deliver_interrupts(vcpu,
  353. kvm_read_c0_guest_cause(vcpu->arch.cop0));
  354. guest_enter_irqoff();
  355. /* Disable hardware page table walking while in guest */
  356. htw_stop();
  357. trace_kvm_enter(vcpu);
  358. r = vcpu->arch.vcpu_run(run, vcpu);
  359. trace_kvm_out(vcpu);
  360. /* Re-enable HTW before enabling interrupts */
  361. htw_start();
  362. guest_exit_irqoff();
  363. local_irq_enable();
  364. if (vcpu->sigset_active)
  365. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  366. return r;
  367. }
  368. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  369. struct kvm_mips_interrupt *irq)
  370. {
  371. int intr = (int)irq->irq;
  372. struct kvm_vcpu *dvcpu = NULL;
  373. if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
  374. kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
  375. (int)intr);
  376. if (irq->cpu == -1)
  377. dvcpu = vcpu;
  378. else
  379. dvcpu = vcpu->kvm->vcpus[irq->cpu];
  380. if (intr == 2 || intr == 3 || intr == 4) {
  381. kvm_mips_callbacks->queue_io_int(dvcpu, irq);
  382. } else if (intr == -2 || intr == -3 || intr == -4) {
  383. kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
  384. } else {
  385. kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
  386. irq->cpu, irq->irq);
  387. return -EINVAL;
  388. }
  389. dvcpu->arch.wait = 0;
  390. if (swait_active(&dvcpu->wq))
  391. swake_up(&dvcpu->wq);
  392. return 0;
  393. }
  394. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  395. struct kvm_mp_state *mp_state)
  396. {
  397. return -ENOIOCTLCMD;
  398. }
  399. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  400. struct kvm_mp_state *mp_state)
  401. {
  402. return -ENOIOCTLCMD;
  403. }
  404. static u64 kvm_mips_get_one_regs[] = {
  405. KVM_REG_MIPS_R0,
  406. KVM_REG_MIPS_R1,
  407. KVM_REG_MIPS_R2,
  408. KVM_REG_MIPS_R3,
  409. KVM_REG_MIPS_R4,
  410. KVM_REG_MIPS_R5,
  411. KVM_REG_MIPS_R6,
  412. KVM_REG_MIPS_R7,
  413. KVM_REG_MIPS_R8,
  414. KVM_REG_MIPS_R9,
  415. KVM_REG_MIPS_R10,
  416. KVM_REG_MIPS_R11,
  417. KVM_REG_MIPS_R12,
  418. KVM_REG_MIPS_R13,
  419. KVM_REG_MIPS_R14,
  420. KVM_REG_MIPS_R15,
  421. KVM_REG_MIPS_R16,
  422. KVM_REG_MIPS_R17,
  423. KVM_REG_MIPS_R18,
  424. KVM_REG_MIPS_R19,
  425. KVM_REG_MIPS_R20,
  426. KVM_REG_MIPS_R21,
  427. KVM_REG_MIPS_R22,
  428. KVM_REG_MIPS_R23,
  429. KVM_REG_MIPS_R24,
  430. KVM_REG_MIPS_R25,
  431. KVM_REG_MIPS_R26,
  432. KVM_REG_MIPS_R27,
  433. KVM_REG_MIPS_R28,
  434. KVM_REG_MIPS_R29,
  435. KVM_REG_MIPS_R30,
  436. KVM_REG_MIPS_R31,
  437. #ifndef CONFIG_CPU_MIPSR6
  438. KVM_REG_MIPS_HI,
  439. KVM_REG_MIPS_LO,
  440. #endif
  441. KVM_REG_MIPS_PC,
  442. KVM_REG_MIPS_CP0_INDEX,
  443. KVM_REG_MIPS_CP0_CONTEXT,
  444. KVM_REG_MIPS_CP0_USERLOCAL,
  445. KVM_REG_MIPS_CP0_PAGEMASK,
  446. KVM_REG_MIPS_CP0_WIRED,
  447. KVM_REG_MIPS_CP0_HWRENA,
  448. KVM_REG_MIPS_CP0_BADVADDR,
  449. KVM_REG_MIPS_CP0_COUNT,
  450. KVM_REG_MIPS_CP0_ENTRYHI,
  451. KVM_REG_MIPS_CP0_COMPARE,
  452. KVM_REG_MIPS_CP0_STATUS,
  453. KVM_REG_MIPS_CP0_CAUSE,
  454. KVM_REG_MIPS_CP0_EPC,
  455. KVM_REG_MIPS_CP0_PRID,
  456. KVM_REG_MIPS_CP0_CONFIG,
  457. KVM_REG_MIPS_CP0_CONFIG1,
  458. KVM_REG_MIPS_CP0_CONFIG2,
  459. KVM_REG_MIPS_CP0_CONFIG3,
  460. KVM_REG_MIPS_CP0_CONFIG4,
  461. KVM_REG_MIPS_CP0_CONFIG5,
  462. KVM_REG_MIPS_CP0_CONFIG7,
  463. KVM_REG_MIPS_CP0_ERROREPC,
  464. KVM_REG_MIPS_COUNT_CTL,
  465. KVM_REG_MIPS_COUNT_RESUME,
  466. KVM_REG_MIPS_COUNT_HZ,
  467. };
  468. static u64 kvm_mips_get_one_regs_fpu[] = {
  469. KVM_REG_MIPS_FCR_IR,
  470. KVM_REG_MIPS_FCR_CSR,
  471. };
  472. static u64 kvm_mips_get_one_regs_msa[] = {
  473. KVM_REG_MIPS_MSA_IR,
  474. KVM_REG_MIPS_MSA_CSR,
  475. };
  476. static u64 kvm_mips_get_one_regs_kscratch[] = {
  477. KVM_REG_MIPS_CP0_KSCRATCH1,
  478. KVM_REG_MIPS_CP0_KSCRATCH2,
  479. KVM_REG_MIPS_CP0_KSCRATCH3,
  480. KVM_REG_MIPS_CP0_KSCRATCH4,
  481. KVM_REG_MIPS_CP0_KSCRATCH5,
  482. KVM_REG_MIPS_CP0_KSCRATCH6,
  483. };
  484. static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
  485. {
  486. unsigned long ret;
  487. ret = ARRAY_SIZE(kvm_mips_get_one_regs);
  488. if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
  489. ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
  490. /* odd doubles */
  491. if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
  492. ret += 16;
  493. }
  494. if (kvm_mips_guest_can_have_msa(&vcpu->arch))
  495. ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
  496. ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
  497. ret += kvm_mips_callbacks->num_regs(vcpu);
  498. return ret;
  499. }
  500. static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
  501. {
  502. u64 index;
  503. unsigned int i;
  504. if (copy_to_user(indices, kvm_mips_get_one_regs,
  505. sizeof(kvm_mips_get_one_regs)))
  506. return -EFAULT;
  507. indices += ARRAY_SIZE(kvm_mips_get_one_regs);
  508. if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
  509. if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
  510. sizeof(kvm_mips_get_one_regs_fpu)))
  511. return -EFAULT;
  512. indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
  513. for (i = 0; i < 32; ++i) {
  514. index = KVM_REG_MIPS_FPR_32(i);
  515. if (copy_to_user(indices, &index, sizeof(index)))
  516. return -EFAULT;
  517. ++indices;
  518. /* skip odd doubles if no F64 */
  519. if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
  520. continue;
  521. index = KVM_REG_MIPS_FPR_64(i);
  522. if (copy_to_user(indices, &index, sizeof(index)))
  523. return -EFAULT;
  524. ++indices;
  525. }
  526. }
  527. if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
  528. if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
  529. sizeof(kvm_mips_get_one_regs_msa)))
  530. return -EFAULT;
  531. indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
  532. for (i = 0; i < 32; ++i) {
  533. index = KVM_REG_MIPS_VEC_128(i);
  534. if (copy_to_user(indices, &index, sizeof(index)))
  535. return -EFAULT;
  536. ++indices;
  537. }
  538. }
  539. for (i = 0; i < 6; ++i) {
  540. if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
  541. continue;
  542. if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
  543. sizeof(kvm_mips_get_one_regs_kscratch[i])))
  544. return -EFAULT;
  545. ++indices;
  546. }
  547. return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
  548. }
  549. static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
  550. const struct kvm_one_reg *reg)
  551. {
  552. struct mips_coproc *cop0 = vcpu->arch.cop0;
  553. struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
  554. int ret;
  555. s64 v;
  556. s64 vs[2];
  557. unsigned int idx;
  558. switch (reg->id) {
  559. /* General purpose registers */
  560. case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
  561. v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
  562. break;
  563. #ifndef CONFIG_CPU_MIPSR6
  564. case KVM_REG_MIPS_HI:
  565. v = (long)vcpu->arch.hi;
  566. break;
  567. case KVM_REG_MIPS_LO:
  568. v = (long)vcpu->arch.lo;
  569. break;
  570. #endif
  571. case KVM_REG_MIPS_PC:
  572. v = (long)vcpu->arch.pc;
  573. break;
  574. /* Floating point registers */
  575. case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
  576. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  577. return -EINVAL;
  578. idx = reg->id - KVM_REG_MIPS_FPR_32(0);
  579. /* Odd singles in top of even double when FR=0 */
  580. if (kvm_read_c0_guest_status(cop0) & ST0_FR)
  581. v = get_fpr32(&fpu->fpr[idx], 0);
  582. else
  583. v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
  584. break;
  585. case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
  586. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  587. return -EINVAL;
  588. idx = reg->id - KVM_REG_MIPS_FPR_64(0);
  589. /* Can't access odd doubles in FR=0 mode */
  590. if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
  591. return -EINVAL;
  592. v = get_fpr64(&fpu->fpr[idx], 0);
  593. break;
  594. case KVM_REG_MIPS_FCR_IR:
  595. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  596. return -EINVAL;
  597. v = boot_cpu_data.fpu_id;
  598. break;
  599. case KVM_REG_MIPS_FCR_CSR:
  600. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  601. return -EINVAL;
  602. v = fpu->fcr31;
  603. break;
  604. /* MIPS SIMD Architecture (MSA) registers */
  605. case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
  606. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  607. return -EINVAL;
  608. /* Can't access MSA registers in FR=0 mode */
  609. if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
  610. return -EINVAL;
  611. idx = reg->id - KVM_REG_MIPS_VEC_128(0);
  612. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  613. /* least significant byte first */
  614. vs[0] = get_fpr64(&fpu->fpr[idx], 0);
  615. vs[1] = get_fpr64(&fpu->fpr[idx], 1);
  616. #else
  617. /* most significant byte first */
  618. vs[0] = get_fpr64(&fpu->fpr[idx], 1);
  619. vs[1] = get_fpr64(&fpu->fpr[idx], 0);
  620. #endif
  621. break;
  622. case KVM_REG_MIPS_MSA_IR:
  623. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  624. return -EINVAL;
  625. v = boot_cpu_data.msa_id;
  626. break;
  627. case KVM_REG_MIPS_MSA_CSR:
  628. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  629. return -EINVAL;
  630. v = fpu->msacsr;
  631. break;
  632. /* Co-processor 0 registers */
  633. case KVM_REG_MIPS_CP0_INDEX:
  634. v = (long)kvm_read_c0_guest_index(cop0);
  635. break;
  636. case KVM_REG_MIPS_CP0_CONTEXT:
  637. v = (long)kvm_read_c0_guest_context(cop0);
  638. break;
  639. case KVM_REG_MIPS_CP0_USERLOCAL:
  640. v = (long)kvm_read_c0_guest_userlocal(cop0);
  641. break;
  642. case KVM_REG_MIPS_CP0_PAGEMASK:
  643. v = (long)kvm_read_c0_guest_pagemask(cop0);
  644. break;
  645. case KVM_REG_MIPS_CP0_WIRED:
  646. v = (long)kvm_read_c0_guest_wired(cop0);
  647. break;
  648. case KVM_REG_MIPS_CP0_HWRENA:
  649. v = (long)kvm_read_c0_guest_hwrena(cop0);
  650. break;
  651. case KVM_REG_MIPS_CP0_BADVADDR:
  652. v = (long)kvm_read_c0_guest_badvaddr(cop0);
  653. break;
  654. case KVM_REG_MIPS_CP0_ENTRYHI:
  655. v = (long)kvm_read_c0_guest_entryhi(cop0);
  656. break;
  657. case KVM_REG_MIPS_CP0_COMPARE:
  658. v = (long)kvm_read_c0_guest_compare(cop0);
  659. break;
  660. case KVM_REG_MIPS_CP0_STATUS:
  661. v = (long)kvm_read_c0_guest_status(cop0);
  662. break;
  663. case KVM_REG_MIPS_CP0_CAUSE:
  664. v = (long)kvm_read_c0_guest_cause(cop0);
  665. break;
  666. case KVM_REG_MIPS_CP0_EPC:
  667. v = (long)kvm_read_c0_guest_epc(cop0);
  668. break;
  669. case KVM_REG_MIPS_CP0_PRID:
  670. v = (long)kvm_read_c0_guest_prid(cop0);
  671. break;
  672. case KVM_REG_MIPS_CP0_CONFIG:
  673. v = (long)kvm_read_c0_guest_config(cop0);
  674. break;
  675. case KVM_REG_MIPS_CP0_CONFIG1:
  676. v = (long)kvm_read_c0_guest_config1(cop0);
  677. break;
  678. case KVM_REG_MIPS_CP0_CONFIG2:
  679. v = (long)kvm_read_c0_guest_config2(cop0);
  680. break;
  681. case KVM_REG_MIPS_CP0_CONFIG3:
  682. v = (long)kvm_read_c0_guest_config3(cop0);
  683. break;
  684. case KVM_REG_MIPS_CP0_CONFIG4:
  685. v = (long)kvm_read_c0_guest_config4(cop0);
  686. break;
  687. case KVM_REG_MIPS_CP0_CONFIG5:
  688. v = (long)kvm_read_c0_guest_config5(cop0);
  689. break;
  690. case KVM_REG_MIPS_CP0_CONFIG7:
  691. v = (long)kvm_read_c0_guest_config7(cop0);
  692. break;
  693. case KVM_REG_MIPS_CP0_ERROREPC:
  694. v = (long)kvm_read_c0_guest_errorepc(cop0);
  695. break;
  696. case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
  697. idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
  698. if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
  699. return -EINVAL;
  700. switch (idx) {
  701. case 2:
  702. v = (long)kvm_read_c0_guest_kscratch1(cop0);
  703. break;
  704. case 3:
  705. v = (long)kvm_read_c0_guest_kscratch2(cop0);
  706. break;
  707. case 4:
  708. v = (long)kvm_read_c0_guest_kscratch3(cop0);
  709. break;
  710. case 5:
  711. v = (long)kvm_read_c0_guest_kscratch4(cop0);
  712. break;
  713. case 6:
  714. v = (long)kvm_read_c0_guest_kscratch5(cop0);
  715. break;
  716. case 7:
  717. v = (long)kvm_read_c0_guest_kscratch6(cop0);
  718. break;
  719. }
  720. break;
  721. /* registers to be handled specially */
  722. default:
  723. ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
  724. if (ret)
  725. return ret;
  726. break;
  727. }
  728. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  729. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  730. return put_user(v, uaddr64);
  731. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  732. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  733. u32 v32 = (u32)v;
  734. return put_user(v32, uaddr32);
  735. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
  736. void __user *uaddr = (void __user *)(long)reg->addr;
  737. return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
  738. } else {
  739. return -EINVAL;
  740. }
  741. }
  742. static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
  743. const struct kvm_one_reg *reg)
  744. {
  745. struct mips_coproc *cop0 = vcpu->arch.cop0;
  746. struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
  747. s64 v;
  748. s64 vs[2];
  749. unsigned int idx;
  750. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  751. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  752. if (get_user(v, uaddr64) != 0)
  753. return -EFAULT;
  754. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  755. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  756. s32 v32;
  757. if (get_user(v32, uaddr32) != 0)
  758. return -EFAULT;
  759. v = (s64)v32;
  760. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
  761. void __user *uaddr = (void __user *)(long)reg->addr;
  762. return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
  763. } else {
  764. return -EINVAL;
  765. }
  766. switch (reg->id) {
  767. /* General purpose registers */
  768. case KVM_REG_MIPS_R0:
  769. /* Silently ignore requests to set $0 */
  770. break;
  771. case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
  772. vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
  773. break;
  774. #ifndef CONFIG_CPU_MIPSR6
  775. case KVM_REG_MIPS_HI:
  776. vcpu->arch.hi = v;
  777. break;
  778. case KVM_REG_MIPS_LO:
  779. vcpu->arch.lo = v;
  780. break;
  781. #endif
  782. case KVM_REG_MIPS_PC:
  783. vcpu->arch.pc = v;
  784. break;
  785. /* Floating point registers */
  786. case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
  787. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  788. return -EINVAL;
  789. idx = reg->id - KVM_REG_MIPS_FPR_32(0);
  790. /* Odd singles in top of even double when FR=0 */
  791. if (kvm_read_c0_guest_status(cop0) & ST0_FR)
  792. set_fpr32(&fpu->fpr[idx], 0, v);
  793. else
  794. set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
  795. break;
  796. case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
  797. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  798. return -EINVAL;
  799. idx = reg->id - KVM_REG_MIPS_FPR_64(0);
  800. /* Can't access odd doubles in FR=0 mode */
  801. if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
  802. return -EINVAL;
  803. set_fpr64(&fpu->fpr[idx], 0, v);
  804. break;
  805. case KVM_REG_MIPS_FCR_IR:
  806. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  807. return -EINVAL;
  808. /* Read-only */
  809. break;
  810. case KVM_REG_MIPS_FCR_CSR:
  811. if (!kvm_mips_guest_has_fpu(&vcpu->arch))
  812. return -EINVAL;
  813. fpu->fcr31 = v;
  814. break;
  815. /* MIPS SIMD Architecture (MSA) registers */
  816. case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
  817. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  818. return -EINVAL;
  819. idx = reg->id - KVM_REG_MIPS_VEC_128(0);
  820. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  821. /* least significant byte first */
  822. set_fpr64(&fpu->fpr[idx], 0, vs[0]);
  823. set_fpr64(&fpu->fpr[idx], 1, vs[1]);
  824. #else
  825. /* most significant byte first */
  826. set_fpr64(&fpu->fpr[idx], 1, vs[0]);
  827. set_fpr64(&fpu->fpr[idx], 0, vs[1]);
  828. #endif
  829. break;
  830. case KVM_REG_MIPS_MSA_IR:
  831. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  832. return -EINVAL;
  833. /* Read-only */
  834. break;
  835. case KVM_REG_MIPS_MSA_CSR:
  836. if (!kvm_mips_guest_has_msa(&vcpu->arch))
  837. return -EINVAL;
  838. fpu->msacsr = v;
  839. break;
  840. /* Co-processor 0 registers */
  841. case KVM_REG_MIPS_CP0_INDEX:
  842. kvm_write_c0_guest_index(cop0, v);
  843. break;
  844. case KVM_REG_MIPS_CP0_CONTEXT:
  845. kvm_write_c0_guest_context(cop0, v);
  846. break;
  847. case KVM_REG_MIPS_CP0_USERLOCAL:
  848. kvm_write_c0_guest_userlocal(cop0, v);
  849. break;
  850. case KVM_REG_MIPS_CP0_PAGEMASK:
  851. kvm_write_c0_guest_pagemask(cop0, v);
  852. break;
  853. case KVM_REG_MIPS_CP0_WIRED:
  854. kvm_write_c0_guest_wired(cop0, v);
  855. break;
  856. case KVM_REG_MIPS_CP0_HWRENA:
  857. kvm_write_c0_guest_hwrena(cop0, v);
  858. break;
  859. case KVM_REG_MIPS_CP0_BADVADDR:
  860. kvm_write_c0_guest_badvaddr(cop0, v);
  861. break;
  862. case KVM_REG_MIPS_CP0_ENTRYHI:
  863. kvm_write_c0_guest_entryhi(cop0, v);
  864. break;
  865. case KVM_REG_MIPS_CP0_STATUS:
  866. kvm_write_c0_guest_status(cop0, v);
  867. break;
  868. case KVM_REG_MIPS_CP0_EPC:
  869. kvm_write_c0_guest_epc(cop0, v);
  870. break;
  871. case KVM_REG_MIPS_CP0_PRID:
  872. kvm_write_c0_guest_prid(cop0, v);
  873. break;
  874. case KVM_REG_MIPS_CP0_ERROREPC:
  875. kvm_write_c0_guest_errorepc(cop0, v);
  876. break;
  877. case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
  878. idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
  879. if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
  880. return -EINVAL;
  881. switch (idx) {
  882. case 2:
  883. kvm_write_c0_guest_kscratch1(cop0, v);
  884. break;
  885. case 3:
  886. kvm_write_c0_guest_kscratch2(cop0, v);
  887. break;
  888. case 4:
  889. kvm_write_c0_guest_kscratch3(cop0, v);
  890. break;
  891. case 5:
  892. kvm_write_c0_guest_kscratch4(cop0, v);
  893. break;
  894. case 6:
  895. kvm_write_c0_guest_kscratch5(cop0, v);
  896. break;
  897. case 7:
  898. kvm_write_c0_guest_kscratch6(cop0, v);
  899. break;
  900. }
  901. break;
  902. /* registers to be handled specially */
  903. default:
  904. return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
  905. }
  906. return 0;
  907. }
  908. static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
  909. struct kvm_enable_cap *cap)
  910. {
  911. int r = 0;
  912. if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
  913. return -EINVAL;
  914. if (cap->flags)
  915. return -EINVAL;
  916. if (cap->args[0])
  917. return -EINVAL;
  918. switch (cap->cap) {
  919. case KVM_CAP_MIPS_FPU:
  920. vcpu->arch.fpu_enabled = true;
  921. break;
  922. case KVM_CAP_MIPS_MSA:
  923. vcpu->arch.msa_enabled = true;
  924. break;
  925. default:
  926. r = -EINVAL;
  927. break;
  928. }
  929. return r;
  930. }
  931. long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
  932. unsigned long arg)
  933. {
  934. struct kvm_vcpu *vcpu = filp->private_data;
  935. void __user *argp = (void __user *)arg;
  936. long r;
  937. switch (ioctl) {
  938. case KVM_SET_ONE_REG:
  939. case KVM_GET_ONE_REG: {
  940. struct kvm_one_reg reg;
  941. if (copy_from_user(&reg, argp, sizeof(reg)))
  942. return -EFAULT;
  943. if (ioctl == KVM_SET_ONE_REG)
  944. return kvm_mips_set_reg(vcpu, &reg);
  945. else
  946. return kvm_mips_get_reg(vcpu, &reg);
  947. }
  948. case KVM_GET_REG_LIST: {
  949. struct kvm_reg_list __user *user_list = argp;
  950. struct kvm_reg_list reg_list;
  951. unsigned n;
  952. if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
  953. return -EFAULT;
  954. n = reg_list.n;
  955. reg_list.n = kvm_mips_num_regs(vcpu);
  956. if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
  957. return -EFAULT;
  958. if (n < reg_list.n)
  959. return -E2BIG;
  960. return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
  961. }
  962. case KVM_NMI:
  963. /* Treat the NMI as a CPU reset */
  964. r = kvm_mips_reset_vcpu(vcpu);
  965. break;
  966. case KVM_INTERRUPT:
  967. {
  968. struct kvm_mips_interrupt irq;
  969. r = -EFAULT;
  970. if (copy_from_user(&irq, argp, sizeof(irq)))
  971. goto out;
  972. kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
  973. irq.irq);
  974. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  975. break;
  976. }
  977. case KVM_ENABLE_CAP: {
  978. struct kvm_enable_cap cap;
  979. r = -EFAULT;
  980. if (copy_from_user(&cap, argp, sizeof(cap)))
  981. goto out;
  982. r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
  983. break;
  984. }
  985. default:
  986. r = -ENOIOCTLCMD;
  987. }
  988. out:
  989. return r;
  990. }
  991. /* Get (and clear) the dirty memory log for a memory slot. */
  992. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  993. {
  994. struct kvm_memslots *slots;
  995. struct kvm_memory_slot *memslot;
  996. unsigned long ga, ga_end;
  997. int is_dirty = 0;
  998. int r;
  999. unsigned long n;
  1000. mutex_lock(&kvm->slots_lock);
  1001. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  1002. if (r)
  1003. goto out;
  1004. /* If nothing is dirty, don't bother messing with page tables. */
  1005. if (is_dirty) {
  1006. slots = kvm_memslots(kvm);
  1007. memslot = id_to_memslot(slots, log->slot);
  1008. ga = memslot->base_gfn << PAGE_SHIFT;
  1009. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  1010. kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
  1011. ga_end);
  1012. n = kvm_dirty_bitmap_bytes(memslot);
  1013. memset(memslot->dirty_bitmap, 0, n);
  1014. }
  1015. r = 0;
  1016. out:
  1017. mutex_unlock(&kvm->slots_lock);
  1018. return r;
  1019. }
  1020. long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
  1021. {
  1022. long r;
  1023. switch (ioctl) {
  1024. default:
  1025. r = -ENOIOCTLCMD;
  1026. }
  1027. return r;
  1028. }
  1029. int kvm_arch_init(void *opaque)
  1030. {
  1031. if (kvm_mips_callbacks) {
  1032. kvm_err("kvm: module already exists\n");
  1033. return -EEXIST;
  1034. }
  1035. return kvm_mips_emulation_init(&kvm_mips_callbacks);
  1036. }
  1037. void kvm_arch_exit(void)
  1038. {
  1039. kvm_mips_callbacks = NULL;
  1040. }
  1041. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  1042. struct kvm_sregs *sregs)
  1043. {
  1044. return -ENOIOCTLCMD;
  1045. }
  1046. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  1047. struct kvm_sregs *sregs)
  1048. {
  1049. return -ENOIOCTLCMD;
  1050. }
  1051. void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  1052. {
  1053. }
  1054. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1055. {
  1056. return -ENOIOCTLCMD;
  1057. }
  1058. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  1059. {
  1060. return -ENOIOCTLCMD;
  1061. }
  1062. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  1063. {
  1064. return VM_FAULT_SIGBUS;
  1065. }
  1066. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  1067. {
  1068. int r;
  1069. switch (ext) {
  1070. case KVM_CAP_ONE_REG:
  1071. case KVM_CAP_ENABLE_CAP:
  1072. r = 1;
  1073. break;
  1074. case KVM_CAP_COALESCED_MMIO:
  1075. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  1076. break;
  1077. case KVM_CAP_MIPS_FPU:
  1078. /* We don't handle systems with inconsistent cpu_has_fpu */
  1079. r = !!raw_cpu_has_fpu;
  1080. break;
  1081. case KVM_CAP_MIPS_MSA:
  1082. /*
  1083. * We don't support MSA vector partitioning yet:
  1084. * 1) It would require explicit support which can't be tested
  1085. * yet due to lack of support in current hardware.
  1086. * 2) It extends the state that would need to be saved/restored
  1087. * by e.g. QEMU for migration.
  1088. *
  1089. * When vector partitioning hardware becomes available, support
  1090. * could be added by requiring a flag when enabling
  1091. * KVM_CAP_MIPS_MSA capability to indicate that userland knows
  1092. * to save/restore the appropriate extra state.
  1093. */
  1094. r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
  1095. break;
  1096. default:
  1097. r = 0;
  1098. break;
  1099. }
  1100. return r;
  1101. }
  1102. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  1103. {
  1104. return kvm_mips_pending_timer(vcpu);
  1105. }
  1106. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
  1107. {
  1108. int i;
  1109. struct mips_coproc *cop0;
  1110. if (!vcpu)
  1111. return -1;
  1112. kvm_debug("VCPU Register Dump:\n");
  1113. kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
  1114. kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
  1115. for (i = 0; i < 32; i += 4) {
  1116. kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
  1117. vcpu->arch.gprs[i],
  1118. vcpu->arch.gprs[i + 1],
  1119. vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
  1120. }
  1121. kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
  1122. kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
  1123. cop0 = vcpu->arch.cop0;
  1124. kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
  1125. kvm_read_c0_guest_status(cop0),
  1126. kvm_read_c0_guest_cause(cop0));
  1127. kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
  1128. return 0;
  1129. }
  1130. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  1131. {
  1132. int i;
  1133. for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  1134. vcpu->arch.gprs[i] = regs->gpr[i];
  1135. vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
  1136. vcpu->arch.hi = regs->hi;
  1137. vcpu->arch.lo = regs->lo;
  1138. vcpu->arch.pc = regs->pc;
  1139. return 0;
  1140. }
  1141. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  1142. {
  1143. int i;
  1144. for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  1145. regs->gpr[i] = vcpu->arch.gprs[i];
  1146. regs->hi = vcpu->arch.hi;
  1147. regs->lo = vcpu->arch.lo;
  1148. regs->pc = vcpu->arch.pc;
  1149. return 0;
  1150. }
  1151. static void kvm_mips_comparecount_func(unsigned long data)
  1152. {
  1153. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  1154. kvm_mips_callbacks->queue_timer_int(vcpu);
  1155. vcpu->arch.wait = 0;
  1156. if (swait_active(&vcpu->wq))
  1157. swake_up(&vcpu->wq);
  1158. }
  1159. /* low level hrtimer wake routine */
  1160. static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
  1161. {
  1162. struct kvm_vcpu *vcpu;
  1163. vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
  1164. kvm_mips_comparecount_func((unsigned long) vcpu);
  1165. return kvm_mips_count_timeout(vcpu);
  1166. }
  1167. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  1168. {
  1169. kvm_mips_callbacks->vcpu_init(vcpu);
  1170. hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
  1171. HRTIMER_MODE_REL);
  1172. vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
  1173. return 0;
  1174. }
  1175. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  1176. struct kvm_translation *tr)
  1177. {
  1178. return 0;
  1179. }
  1180. /* Initial guest state */
  1181. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  1182. {
  1183. return kvm_mips_callbacks->vcpu_setup(vcpu);
  1184. }
  1185. static void kvm_mips_set_c0_status(void)
  1186. {
  1187. u32 status = read_c0_status();
  1188. if (cpu_has_dsp)
  1189. status |= (ST0_MX);
  1190. write_c0_status(status);
  1191. ehb();
  1192. }
  1193. /*
  1194. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  1195. */
  1196. int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  1197. {
  1198. u32 cause = vcpu->arch.host_cp0_cause;
  1199. u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  1200. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  1201. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  1202. enum emulation_result er = EMULATE_DONE;
  1203. int ret = RESUME_GUEST;
  1204. /* re-enable HTW before enabling interrupts */
  1205. htw_start();
  1206. /* Set a default exit reason */
  1207. run->exit_reason = KVM_EXIT_UNKNOWN;
  1208. run->ready_for_interrupt_injection = 1;
  1209. /*
  1210. * Set the appropriate status bits based on host CPU features,
  1211. * before we hit the scheduler
  1212. */
  1213. kvm_mips_set_c0_status();
  1214. local_irq_enable();
  1215. kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
  1216. cause, opc, run, vcpu);
  1217. trace_kvm_exit(vcpu, exccode);
  1218. /*
  1219. * Do a privilege check, if in UM most of these exit conditions end up
  1220. * causing an exception to be delivered to the Guest Kernel
  1221. */
  1222. er = kvm_mips_check_privilege(cause, opc, run, vcpu);
  1223. if (er == EMULATE_PRIV_FAIL) {
  1224. goto skip_emul;
  1225. } else if (er == EMULATE_FAIL) {
  1226. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1227. ret = RESUME_HOST;
  1228. goto skip_emul;
  1229. }
  1230. switch (exccode) {
  1231. case EXCCODE_INT:
  1232. kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
  1233. ++vcpu->stat.int_exits;
  1234. if (need_resched())
  1235. cond_resched();
  1236. ret = RESUME_GUEST;
  1237. break;
  1238. case EXCCODE_CPU:
  1239. kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
  1240. ++vcpu->stat.cop_unusable_exits;
  1241. ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
  1242. /* XXXKYMA: Might need to return to user space */
  1243. if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
  1244. ret = RESUME_HOST;
  1245. break;
  1246. case EXCCODE_MOD:
  1247. ++vcpu->stat.tlbmod_exits;
  1248. ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
  1249. break;
  1250. case EXCCODE_TLBS:
  1251. kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
  1252. cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
  1253. badvaddr);
  1254. ++vcpu->stat.tlbmiss_st_exits;
  1255. ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
  1256. break;
  1257. case EXCCODE_TLBL:
  1258. kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  1259. cause, opc, badvaddr);
  1260. ++vcpu->stat.tlbmiss_ld_exits;
  1261. ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
  1262. break;
  1263. case EXCCODE_ADES:
  1264. ++vcpu->stat.addrerr_st_exits;
  1265. ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
  1266. break;
  1267. case EXCCODE_ADEL:
  1268. ++vcpu->stat.addrerr_ld_exits;
  1269. ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
  1270. break;
  1271. case EXCCODE_SYS:
  1272. ++vcpu->stat.syscall_exits;
  1273. ret = kvm_mips_callbacks->handle_syscall(vcpu);
  1274. break;
  1275. case EXCCODE_RI:
  1276. ++vcpu->stat.resvd_inst_exits;
  1277. ret = kvm_mips_callbacks->handle_res_inst(vcpu);
  1278. break;
  1279. case EXCCODE_BP:
  1280. ++vcpu->stat.break_inst_exits;
  1281. ret = kvm_mips_callbacks->handle_break(vcpu);
  1282. break;
  1283. case EXCCODE_TR:
  1284. ++vcpu->stat.trap_inst_exits;
  1285. ret = kvm_mips_callbacks->handle_trap(vcpu);
  1286. break;
  1287. case EXCCODE_MSAFPE:
  1288. ++vcpu->stat.msa_fpe_exits;
  1289. ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
  1290. break;
  1291. case EXCCODE_FPE:
  1292. ++vcpu->stat.fpe_exits;
  1293. ret = kvm_mips_callbacks->handle_fpe(vcpu);
  1294. break;
  1295. case EXCCODE_MSADIS:
  1296. ++vcpu->stat.msa_disabled_exits;
  1297. ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
  1298. break;
  1299. default:
  1300. kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
  1301. exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
  1302. kvm_read_c0_guest_status(vcpu->arch.cop0));
  1303. kvm_arch_vcpu_dump_regs(vcpu);
  1304. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  1305. ret = RESUME_HOST;
  1306. break;
  1307. }
  1308. skip_emul:
  1309. local_irq_disable();
  1310. if (er == EMULATE_DONE && !(ret & RESUME_HOST))
  1311. kvm_mips_deliver_interrupts(vcpu, cause);
  1312. if (!(ret & RESUME_HOST)) {
  1313. /* Only check for signals if not already exiting to userspace */
  1314. if (signal_pending(current)) {
  1315. run->exit_reason = KVM_EXIT_INTR;
  1316. ret = (-EINTR << 2) | RESUME_HOST;
  1317. ++vcpu->stat.signal_exits;
  1318. trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
  1319. }
  1320. }
  1321. if (ret == RESUME_GUEST) {
  1322. trace_kvm_reenter(vcpu);
  1323. /*
  1324. * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
  1325. * is live), restore FCR31 / MSACSR.
  1326. *
  1327. * This should be before returning to the guest exception
  1328. * vector, as it may well cause an [MSA] FP exception if there
  1329. * are pending exception bits unmasked. (see
  1330. * kvm_mips_csr_die_notifier() for how that is handled).
  1331. */
  1332. if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
  1333. read_c0_status() & ST0_CU1)
  1334. __kvm_restore_fcsr(&vcpu->arch);
  1335. if (kvm_mips_guest_has_msa(&vcpu->arch) &&
  1336. read_c0_config5() & MIPS_CONF5_MSAEN)
  1337. __kvm_restore_msacsr(&vcpu->arch);
  1338. }
  1339. /* Disable HTW before returning to guest or host */
  1340. htw_stop();
  1341. return ret;
  1342. }
  1343. /* Enable FPU for guest and restore context */
  1344. void kvm_own_fpu(struct kvm_vcpu *vcpu)
  1345. {
  1346. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1347. unsigned int sr, cfg5;
  1348. preempt_disable();
  1349. sr = kvm_read_c0_guest_status(cop0);
  1350. /*
  1351. * If MSA state is already live, it is undefined how it interacts with
  1352. * FR=0 FPU state, and we don't want to hit reserved instruction
  1353. * exceptions trying to save the MSA state later when CU=1 && FR=1, so
  1354. * play it safe and save it first.
  1355. *
  1356. * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
  1357. * get called when guest CU1 is set, however we can't trust the guest
  1358. * not to clobber the status register directly via the commpage.
  1359. */
  1360. if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
  1361. vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
  1362. kvm_lose_fpu(vcpu);
  1363. /*
  1364. * Enable FPU for guest
  1365. * We set FR and FRE according to guest context
  1366. */
  1367. change_c0_status(ST0_CU1 | ST0_FR, sr);
  1368. if (cpu_has_fre) {
  1369. cfg5 = kvm_read_c0_guest_config5(cop0);
  1370. change_c0_config5(MIPS_CONF5_FRE, cfg5);
  1371. }
  1372. enable_fpu_hazard();
  1373. /* If guest FPU state not active, restore it now */
  1374. if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
  1375. __kvm_restore_fpu(&vcpu->arch);
  1376. vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
  1377. trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
  1378. } else {
  1379. trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
  1380. }
  1381. preempt_enable();
  1382. }
  1383. #ifdef CONFIG_CPU_HAS_MSA
  1384. /* Enable MSA for guest and restore context */
  1385. void kvm_own_msa(struct kvm_vcpu *vcpu)
  1386. {
  1387. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1388. unsigned int sr, cfg5;
  1389. preempt_disable();
  1390. /*
  1391. * Enable FPU if enabled in guest, since we're restoring FPU context
  1392. * anyway. We set FR and FRE according to guest context.
  1393. */
  1394. if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
  1395. sr = kvm_read_c0_guest_status(cop0);
  1396. /*
  1397. * If FR=0 FPU state is already live, it is undefined how it
  1398. * interacts with MSA state, so play it safe and save it first.
  1399. */
  1400. if (!(sr & ST0_FR) &&
  1401. (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
  1402. KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
  1403. kvm_lose_fpu(vcpu);
  1404. change_c0_status(ST0_CU1 | ST0_FR, sr);
  1405. if (sr & ST0_CU1 && cpu_has_fre) {
  1406. cfg5 = kvm_read_c0_guest_config5(cop0);
  1407. change_c0_config5(MIPS_CONF5_FRE, cfg5);
  1408. }
  1409. }
  1410. /* Enable MSA for guest */
  1411. set_c0_config5(MIPS_CONF5_MSAEN);
  1412. enable_fpu_hazard();
  1413. switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
  1414. case KVM_MIPS_AUX_FPU:
  1415. /*
  1416. * Guest FPU state already loaded, only restore upper MSA state
  1417. */
  1418. __kvm_restore_msa_upper(&vcpu->arch);
  1419. vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
  1420. trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
  1421. break;
  1422. case 0:
  1423. /* Neither FPU or MSA already active, restore full MSA state */
  1424. __kvm_restore_msa(&vcpu->arch);
  1425. vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
  1426. if (kvm_mips_guest_has_fpu(&vcpu->arch))
  1427. vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
  1428. trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
  1429. KVM_TRACE_AUX_FPU_MSA);
  1430. break;
  1431. default:
  1432. trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
  1433. break;
  1434. }
  1435. preempt_enable();
  1436. }
  1437. #endif
  1438. /* Drop FPU & MSA without saving it */
  1439. void kvm_drop_fpu(struct kvm_vcpu *vcpu)
  1440. {
  1441. preempt_disable();
  1442. if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
  1443. disable_msa();
  1444. trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
  1445. vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
  1446. }
  1447. if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
  1448. clear_c0_status(ST0_CU1 | ST0_FR);
  1449. trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
  1450. vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
  1451. }
  1452. preempt_enable();
  1453. }
  1454. /* Save and disable FPU & MSA */
  1455. void kvm_lose_fpu(struct kvm_vcpu *vcpu)
  1456. {
  1457. /*
  1458. * FPU & MSA get disabled in root context (hardware) when it is disabled
  1459. * in guest context (software), but the register state in the hardware
  1460. * may still be in use. This is why we explicitly re-enable the hardware
  1461. * before saving.
  1462. */
  1463. preempt_disable();
  1464. if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
  1465. set_c0_config5(MIPS_CONF5_MSAEN);
  1466. enable_fpu_hazard();
  1467. __kvm_save_msa(&vcpu->arch);
  1468. trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
  1469. /* Disable MSA & FPU */
  1470. disable_msa();
  1471. if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
  1472. clear_c0_status(ST0_CU1 | ST0_FR);
  1473. disable_fpu_hazard();
  1474. }
  1475. vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
  1476. } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
  1477. set_c0_status(ST0_CU1);
  1478. enable_fpu_hazard();
  1479. __kvm_save_fpu(&vcpu->arch);
  1480. vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
  1481. trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
  1482. /* Disable FPU */
  1483. clear_c0_status(ST0_CU1 | ST0_FR);
  1484. disable_fpu_hazard();
  1485. }
  1486. preempt_enable();
  1487. }
  1488. /*
  1489. * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
  1490. * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
  1491. * exception if cause bits are set in the value being written.
  1492. */
  1493. static int kvm_mips_csr_die_notify(struct notifier_block *self,
  1494. unsigned long cmd, void *ptr)
  1495. {
  1496. struct die_args *args = (struct die_args *)ptr;
  1497. struct pt_regs *regs = args->regs;
  1498. unsigned long pc;
  1499. /* Only interested in FPE and MSAFPE */
  1500. if (cmd != DIE_FP && cmd != DIE_MSAFP)
  1501. return NOTIFY_DONE;
  1502. /* Return immediately if guest context isn't active */
  1503. if (!(current->flags & PF_VCPU))
  1504. return NOTIFY_DONE;
  1505. /* Should never get here from user mode */
  1506. BUG_ON(user_mode(regs));
  1507. pc = instruction_pointer(regs);
  1508. switch (cmd) {
  1509. case DIE_FP:
  1510. /* match 2nd instruction in __kvm_restore_fcsr */
  1511. if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
  1512. return NOTIFY_DONE;
  1513. break;
  1514. case DIE_MSAFP:
  1515. /* match 2nd/3rd instruction in __kvm_restore_msacsr */
  1516. if (!cpu_has_msa ||
  1517. pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
  1518. pc > (unsigned long)&__kvm_restore_msacsr + 8)
  1519. return NOTIFY_DONE;
  1520. break;
  1521. }
  1522. /* Move PC forward a little and continue executing */
  1523. instruction_pointer(regs) += 4;
  1524. return NOTIFY_STOP;
  1525. }
  1526. static struct notifier_block kvm_mips_csr_die_notifier = {
  1527. .notifier_call = kvm_mips_csr_die_notify,
  1528. };
  1529. static int __init kvm_mips_init(void)
  1530. {
  1531. int ret;
  1532. ret = kvm_mips_entry_setup();
  1533. if (ret)
  1534. return ret;
  1535. ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  1536. if (ret)
  1537. return ret;
  1538. register_die_notifier(&kvm_mips_csr_die_notifier);
  1539. return 0;
  1540. }
  1541. static void __exit kvm_mips_exit(void)
  1542. {
  1543. kvm_exit();
  1544. unregister_die_notifier(&kvm_mips_csr_die_notifier);
  1545. }
  1546. module_init(kvm_mips_init);
  1547. module_exit(kvm_mips_exit);
  1548. EXPORT_TRACEPOINT_SYMBOL(kvm_exit);