kvm_mips_emul.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Instruction/Exception emulation
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/module.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/fs.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/random.h>
  19. #include <asm/page.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/cpu-info.h>
  22. #include <asm/mmu_context.h>
  23. #include <asm/tlbflush.h>
  24. #include <asm/inst.h>
  25. #undef CONFIG_MIPS_MT
  26. #include <asm/r4kcache.h>
  27. #define CONFIG_MIPS_MT
  28. #include "kvm_mips_opcode.h"
  29. #include "kvm_mips_int.h"
  30. #include "kvm_mips_comm.h"
  31. #include "trace.h"
  32. /*
  33. * Compute the return address and do emulate branch simulation, if required.
  34. * This function should be called only in branch delay slot active.
  35. */
  36. unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
  37. unsigned long instpc)
  38. {
  39. unsigned int dspcontrol;
  40. union mips_instruction insn;
  41. struct kvm_vcpu_arch *arch = &vcpu->arch;
  42. long epc = instpc;
  43. long nextpc = KVM_INVALID_INST;
  44. if (epc & 3)
  45. goto unaligned;
  46. /*
  47. * Read the instruction
  48. */
  49. insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
  50. if (insn.word == KVM_INVALID_INST)
  51. return KVM_INVALID_INST;
  52. switch (insn.i_format.opcode) {
  53. /*
  54. * jr and jalr are in r_format format.
  55. */
  56. case spec_op:
  57. switch (insn.r_format.func) {
  58. case jalr_op:
  59. arch->gprs[insn.r_format.rd] = epc + 8;
  60. /* Fall through */
  61. case jr_op:
  62. nextpc = arch->gprs[insn.r_format.rs];
  63. break;
  64. }
  65. break;
  66. /*
  67. * This group contains:
  68. * bltz_op, bgez_op, bltzl_op, bgezl_op,
  69. * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  70. */
  71. case bcond_op:
  72. switch (insn.i_format.rt) {
  73. case bltz_op:
  74. case bltzl_op:
  75. if ((long)arch->gprs[insn.i_format.rs] < 0)
  76. epc = epc + 4 + (insn.i_format.simmediate << 2);
  77. else
  78. epc += 8;
  79. nextpc = epc;
  80. break;
  81. case bgez_op:
  82. case bgezl_op:
  83. if ((long)arch->gprs[insn.i_format.rs] >= 0)
  84. epc = epc + 4 + (insn.i_format.simmediate << 2);
  85. else
  86. epc += 8;
  87. nextpc = epc;
  88. break;
  89. case bltzal_op:
  90. case bltzall_op:
  91. arch->gprs[31] = epc + 8;
  92. if ((long)arch->gprs[insn.i_format.rs] < 0)
  93. epc = epc + 4 + (insn.i_format.simmediate << 2);
  94. else
  95. epc += 8;
  96. nextpc = epc;
  97. break;
  98. case bgezal_op:
  99. case bgezall_op:
  100. arch->gprs[31] = epc + 8;
  101. if ((long)arch->gprs[insn.i_format.rs] >= 0)
  102. epc = epc + 4 + (insn.i_format.simmediate << 2);
  103. else
  104. epc += 8;
  105. nextpc = epc;
  106. break;
  107. case bposge32_op:
  108. if (!cpu_has_dsp)
  109. goto sigill;
  110. dspcontrol = rddsp(0x01);
  111. if (dspcontrol >= 32) {
  112. epc = epc + 4 + (insn.i_format.simmediate << 2);
  113. } else
  114. epc += 8;
  115. nextpc = epc;
  116. break;
  117. }
  118. break;
  119. /*
  120. * These are unconditional and in j_format.
  121. */
  122. case jal_op:
  123. arch->gprs[31] = instpc + 8;
  124. case j_op:
  125. epc += 4;
  126. epc >>= 28;
  127. epc <<= 28;
  128. epc |= (insn.j_format.target << 2);
  129. nextpc = epc;
  130. break;
  131. /*
  132. * These are conditional and in i_format.
  133. */
  134. case beq_op:
  135. case beql_op:
  136. if (arch->gprs[insn.i_format.rs] ==
  137. arch->gprs[insn.i_format.rt])
  138. epc = epc + 4 + (insn.i_format.simmediate << 2);
  139. else
  140. epc += 8;
  141. nextpc = epc;
  142. break;
  143. case bne_op:
  144. case bnel_op:
  145. if (arch->gprs[insn.i_format.rs] !=
  146. arch->gprs[insn.i_format.rt])
  147. epc = epc + 4 + (insn.i_format.simmediate << 2);
  148. else
  149. epc += 8;
  150. nextpc = epc;
  151. break;
  152. case blez_op: /* not really i_format */
  153. case blezl_op:
  154. /* rt field assumed to be zero */
  155. if ((long)arch->gprs[insn.i_format.rs] <= 0)
  156. epc = epc + 4 + (insn.i_format.simmediate << 2);
  157. else
  158. epc += 8;
  159. nextpc = epc;
  160. break;
  161. case bgtz_op:
  162. case bgtzl_op:
  163. /* rt field assumed to be zero */
  164. if ((long)arch->gprs[insn.i_format.rs] > 0)
  165. epc = epc + 4 + (insn.i_format.simmediate << 2);
  166. else
  167. epc += 8;
  168. nextpc = epc;
  169. break;
  170. /*
  171. * And now the FPA/cp1 branch instructions.
  172. */
  173. case cop1_op:
  174. printk("%s: unsupported cop1_op\n", __func__);
  175. break;
  176. }
  177. return nextpc;
  178. unaligned:
  179. printk("%s: unaligned epc\n", __func__);
  180. return nextpc;
  181. sigill:
  182. printk("%s: DSP branch but not DSP ASE\n", __func__);
  183. return nextpc;
  184. }
  185. enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
  186. {
  187. unsigned long branch_pc;
  188. enum emulation_result er = EMULATE_DONE;
  189. if (cause & CAUSEF_BD) {
  190. branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
  191. if (branch_pc == KVM_INVALID_INST) {
  192. er = EMULATE_FAIL;
  193. } else {
  194. vcpu->arch.pc = branch_pc;
  195. kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
  196. }
  197. } else
  198. vcpu->arch.pc += 4;
  199. kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
  200. return er;
  201. }
  202. /* Everytime the compare register is written to, we need to decide when to fire
  203. * the timer that represents timer ticks to the GUEST.
  204. *
  205. */
  206. enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
  207. {
  208. struct mips_coproc *cop0 = vcpu->arch.cop0;
  209. enum emulation_result er = EMULATE_DONE;
  210. /* If COUNT is enabled */
  211. if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
  212. hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
  213. hrtimer_start(&vcpu->arch.comparecount_timer,
  214. ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
  215. } else {
  216. hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
  217. }
  218. return er;
  219. }
  220. enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
  221. {
  222. struct mips_coproc *cop0 = vcpu->arch.cop0;
  223. enum emulation_result er = EMULATE_DONE;
  224. if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
  225. kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
  226. kvm_read_c0_guest_epc(cop0));
  227. kvm_clear_c0_guest_status(cop0, ST0_EXL);
  228. vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
  229. } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
  230. kvm_clear_c0_guest_status(cop0, ST0_ERL);
  231. vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
  232. } else {
  233. printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
  234. vcpu->arch.pc);
  235. er = EMULATE_FAIL;
  236. }
  237. return er;
  238. }
  239. enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
  240. {
  241. enum emulation_result er = EMULATE_DONE;
  242. kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
  243. vcpu->arch.pending_exceptions);
  244. ++vcpu->stat.wait_exits;
  245. trace_kvm_exit(vcpu, WAIT_EXITS);
  246. if (!vcpu->arch.pending_exceptions) {
  247. vcpu->arch.wait = 1;
  248. kvm_vcpu_block(vcpu);
  249. /* We we are runnable, then definitely go off to user space to check if any
  250. * I/O interrupts are pending.
  251. */
  252. if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
  253. clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
  254. vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
  255. }
  256. }
  257. return er;
  258. }
  259. /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
  260. * this, if things ever change
  261. */
  262. enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
  263. {
  264. struct mips_coproc *cop0 = vcpu->arch.cop0;
  265. enum emulation_result er = EMULATE_FAIL;
  266. uint32_t pc = vcpu->arch.pc;
  267. printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
  268. return er;
  269. }
  270. /* Write Guest TLB Entry @ Index */
  271. enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
  272. {
  273. struct mips_coproc *cop0 = vcpu->arch.cop0;
  274. int index = kvm_read_c0_guest_index(cop0);
  275. enum emulation_result er = EMULATE_DONE;
  276. struct kvm_mips_tlb *tlb = NULL;
  277. uint32_t pc = vcpu->arch.pc;
  278. if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
  279. printk("%s: illegal index: %d\n", __func__, index);
  280. printk
  281. ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
  282. pc, index, kvm_read_c0_guest_entryhi(cop0),
  283. kvm_read_c0_guest_entrylo0(cop0),
  284. kvm_read_c0_guest_entrylo1(cop0),
  285. kvm_read_c0_guest_pagemask(cop0));
  286. index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
  287. }
  288. tlb = &vcpu->arch.guest_tlb[index];
  289. #if 1
  290. /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
  291. kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
  292. #endif
  293. tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
  294. tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
  295. tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
  296. tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
  297. kvm_debug
  298. ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
  299. pc, index, kvm_read_c0_guest_entryhi(cop0),
  300. kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
  301. kvm_read_c0_guest_pagemask(cop0));
  302. return er;
  303. }
  304. /* Write Guest TLB Entry @ Random Index */
  305. enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
  306. {
  307. struct mips_coproc *cop0 = vcpu->arch.cop0;
  308. enum emulation_result er = EMULATE_DONE;
  309. struct kvm_mips_tlb *tlb = NULL;
  310. uint32_t pc = vcpu->arch.pc;
  311. int index;
  312. #if 1
  313. get_random_bytes(&index, sizeof(index));
  314. index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
  315. #else
  316. index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
  317. #endif
  318. if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
  319. printk("%s: illegal index: %d\n", __func__, index);
  320. return EMULATE_FAIL;
  321. }
  322. tlb = &vcpu->arch.guest_tlb[index];
  323. #if 1
  324. /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
  325. kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
  326. #endif
  327. tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
  328. tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
  329. tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
  330. tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
  331. kvm_debug
  332. ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
  333. pc, index, kvm_read_c0_guest_entryhi(cop0),
  334. kvm_read_c0_guest_entrylo0(cop0),
  335. kvm_read_c0_guest_entrylo1(cop0));
  336. return er;
  337. }
  338. enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
  339. {
  340. struct mips_coproc *cop0 = vcpu->arch.cop0;
  341. long entryhi = kvm_read_c0_guest_entryhi(cop0);
  342. enum emulation_result er = EMULATE_DONE;
  343. uint32_t pc = vcpu->arch.pc;
  344. int index = -1;
  345. index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
  346. kvm_write_c0_guest_index(cop0, index);
  347. kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
  348. index);
  349. return er;
  350. }
  351. enum emulation_result
  352. kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
  353. struct kvm_run *run, struct kvm_vcpu *vcpu)
  354. {
  355. struct mips_coproc *cop0 = vcpu->arch.cop0;
  356. enum emulation_result er = EMULATE_DONE;
  357. int32_t rt, rd, copz, sel, co_bit, op;
  358. uint32_t pc = vcpu->arch.pc;
  359. unsigned long curr_pc;
  360. /*
  361. * Update PC and hold onto current PC in case there is
  362. * an error and we want to rollback the PC
  363. */
  364. curr_pc = vcpu->arch.pc;
  365. er = update_pc(vcpu, cause);
  366. if (er == EMULATE_FAIL) {
  367. return er;
  368. }
  369. copz = (inst >> 21) & 0x1f;
  370. rt = (inst >> 16) & 0x1f;
  371. rd = (inst >> 11) & 0x1f;
  372. sel = inst & 0x7;
  373. co_bit = (inst >> 25) & 1;
  374. if (co_bit) {
  375. op = (inst) & 0xff;
  376. switch (op) {
  377. case tlbr_op: /* Read indexed TLB entry */
  378. er = kvm_mips_emul_tlbr(vcpu);
  379. break;
  380. case tlbwi_op: /* Write indexed */
  381. er = kvm_mips_emul_tlbwi(vcpu);
  382. break;
  383. case tlbwr_op: /* Write random */
  384. er = kvm_mips_emul_tlbwr(vcpu);
  385. break;
  386. case tlbp_op: /* TLB Probe */
  387. er = kvm_mips_emul_tlbp(vcpu);
  388. break;
  389. case rfe_op:
  390. printk("!!!COP0_RFE!!!\n");
  391. break;
  392. case eret_op:
  393. er = kvm_mips_emul_eret(vcpu);
  394. goto dont_update_pc;
  395. break;
  396. case wait_op:
  397. er = kvm_mips_emul_wait(vcpu);
  398. break;
  399. }
  400. } else {
  401. switch (copz) {
  402. case mfc_op:
  403. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  404. cop0->stat[rd][sel]++;
  405. #endif
  406. /* Get reg */
  407. if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
  408. /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
  409. vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
  410. } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
  411. vcpu->arch.gprs[rt] = 0x0;
  412. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  413. kvm_mips_trans_mfc0(inst, opc, vcpu);
  414. #endif
  415. }
  416. else {
  417. vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
  418. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  419. kvm_mips_trans_mfc0(inst, opc, vcpu);
  420. #endif
  421. }
  422. kvm_debug
  423. ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
  424. pc, rd, sel, rt, vcpu->arch.gprs[rt]);
  425. break;
  426. case dmfc_op:
  427. vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
  428. break;
  429. case mtc_op:
  430. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  431. cop0->stat[rd][sel]++;
  432. #endif
  433. if ((rd == MIPS_CP0_TLB_INDEX)
  434. && (vcpu->arch.gprs[rt] >=
  435. KVM_MIPS_GUEST_TLB_SIZE)) {
  436. printk("Invalid TLB Index: %ld",
  437. vcpu->arch.gprs[rt]);
  438. er = EMULATE_FAIL;
  439. break;
  440. }
  441. #define C0_EBASE_CORE_MASK 0xff
  442. if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
  443. /* Preserve CORE number */
  444. kvm_change_c0_guest_ebase(cop0,
  445. ~(C0_EBASE_CORE_MASK),
  446. vcpu->arch.gprs[rt]);
  447. printk("MTCz, cop0->reg[EBASE]: %#lx\n",
  448. kvm_read_c0_guest_ebase(cop0));
  449. } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
  450. uint32_t nasid =
  451. vcpu->arch.gprs[rt] & ASID_MASK;
  452. if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
  453. &&
  454. ((kvm_read_c0_guest_entryhi(cop0) &
  455. ASID_MASK) != nasid)) {
  456. kvm_debug
  457. ("MTCz, change ASID from %#lx to %#lx\n",
  458. kvm_read_c0_guest_entryhi(cop0) &
  459. ASID_MASK,
  460. vcpu->arch.gprs[rt] & ASID_MASK);
  461. /* Blow away the shadow host TLBs */
  462. kvm_mips_flush_host_tlb(1);
  463. }
  464. kvm_write_c0_guest_entryhi(cop0,
  465. vcpu->arch.gprs[rt]);
  466. }
  467. /* Are we writing to COUNT */
  468. else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
  469. /* Linux doesn't seem to write into COUNT, we throw an error
  470. * if we notice a write to COUNT
  471. */
  472. /*er = EMULATE_FAIL; */
  473. goto done;
  474. } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
  475. kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
  476. pc, kvm_read_c0_guest_compare(cop0),
  477. vcpu->arch.gprs[rt]);
  478. /* If we are writing to COMPARE */
  479. /* Clear pending timer interrupt, if any */
  480. kvm_mips_callbacks->dequeue_timer_int(vcpu);
  481. kvm_write_c0_guest_compare(cop0,
  482. vcpu->arch.gprs[rt]);
  483. } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
  484. kvm_write_c0_guest_status(cop0,
  485. vcpu->arch.gprs[rt]);
  486. /* Make sure that CU1 and NMI bits are never set */
  487. kvm_clear_c0_guest_status(cop0,
  488. (ST0_CU1 | ST0_NMI));
  489. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  490. kvm_mips_trans_mtc0(inst, opc, vcpu);
  491. #endif
  492. } else {
  493. cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
  494. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  495. kvm_mips_trans_mtc0(inst, opc, vcpu);
  496. #endif
  497. }
  498. kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
  499. rd, sel, cop0->reg[rd][sel]);
  500. break;
  501. case dmtc_op:
  502. printk
  503. ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
  504. vcpu->arch.pc, rt, rd, sel);
  505. er = EMULATE_FAIL;
  506. break;
  507. case mfmcz_op:
  508. #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
  509. cop0->stat[MIPS_CP0_STATUS][0]++;
  510. #endif
  511. if (rt != 0) {
  512. vcpu->arch.gprs[rt] =
  513. kvm_read_c0_guest_status(cop0);
  514. }
  515. /* EI */
  516. if (inst & 0x20) {
  517. kvm_debug("[%#lx] mfmcz_op: EI\n",
  518. vcpu->arch.pc);
  519. kvm_set_c0_guest_status(cop0, ST0_IE);
  520. } else {
  521. kvm_debug("[%#lx] mfmcz_op: DI\n",
  522. vcpu->arch.pc);
  523. kvm_clear_c0_guest_status(cop0, ST0_IE);
  524. }
  525. break;
  526. case wrpgpr_op:
  527. {
  528. uint32_t css =
  529. cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
  530. uint32_t pss =
  531. (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
  532. /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
  533. if (css || pss) {
  534. er = EMULATE_FAIL;
  535. break;
  536. }
  537. kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
  538. vcpu->arch.gprs[rt]);
  539. vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
  540. }
  541. break;
  542. default:
  543. printk
  544. ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
  545. vcpu->arch.pc, copz);
  546. er = EMULATE_FAIL;
  547. break;
  548. }
  549. }
  550. done:
  551. /*
  552. * Rollback PC only if emulation was unsuccessful
  553. */
  554. if (er == EMULATE_FAIL) {
  555. vcpu->arch.pc = curr_pc;
  556. }
  557. dont_update_pc:
  558. /*
  559. * This is for special instructions whose emulation
  560. * updates the PC, so do not overwrite the PC under
  561. * any circumstances
  562. */
  563. return er;
  564. }
  565. enum emulation_result
  566. kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
  567. struct kvm_run *run, struct kvm_vcpu *vcpu)
  568. {
  569. enum emulation_result er = EMULATE_DO_MMIO;
  570. int32_t op, base, rt, offset;
  571. uint32_t bytes;
  572. void *data = run->mmio.data;
  573. unsigned long curr_pc;
  574. /*
  575. * Update PC and hold onto current PC in case there is
  576. * an error and we want to rollback the PC
  577. */
  578. curr_pc = vcpu->arch.pc;
  579. er = update_pc(vcpu, cause);
  580. if (er == EMULATE_FAIL)
  581. return er;
  582. rt = (inst >> 16) & 0x1f;
  583. base = (inst >> 21) & 0x1f;
  584. offset = inst & 0xffff;
  585. op = (inst >> 26) & 0x3f;
  586. switch (op) {
  587. case sb_op:
  588. bytes = 1;
  589. if (bytes > sizeof(run->mmio.data)) {
  590. kvm_err("%s: bad MMIO length: %d\n", __func__,
  591. run->mmio.len);
  592. }
  593. run->mmio.phys_addr =
  594. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  595. host_cp0_badvaddr);
  596. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  597. er = EMULATE_FAIL;
  598. break;
  599. }
  600. run->mmio.len = bytes;
  601. run->mmio.is_write = 1;
  602. vcpu->mmio_needed = 1;
  603. vcpu->mmio_is_write = 1;
  604. *(u8 *) data = vcpu->arch.gprs[rt];
  605. kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  606. vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
  607. *(uint8_t *) data);
  608. break;
  609. case sw_op:
  610. bytes = 4;
  611. if (bytes > sizeof(run->mmio.data)) {
  612. kvm_err("%s: bad MMIO length: %d\n", __func__,
  613. run->mmio.len);
  614. }
  615. run->mmio.phys_addr =
  616. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  617. host_cp0_badvaddr);
  618. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  619. er = EMULATE_FAIL;
  620. break;
  621. }
  622. run->mmio.len = bytes;
  623. run->mmio.is_write = 1;
  624. vcpu->mmio_needed = 1;
  625. vcpu->mmio_is_write = 1;
  626. *(uint32_t *) data = vcpu->arch.gprs[rt];
  627. kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  628. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  629. vcpu->arch.gprs[rt], *(uint32_t *) data);
  630. break;
  631. case sh_op:
  632. bytes = 2;
  633. if (bytes > sizeof(run->mmio.data)) {
  634. kvm_err("%s: bad MMIO length: %d\n", __func__,
  635. run->mmio.len);
  636. }
  637. run->mmio.phys_addr =
  638. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  639. host_cp0_badvaddr);
  640. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  641. er = EMULATE_FAIL;
  642. break;
  643. }
  644. run->mmio.len = bytes;
  645. run->mmio.is_write = 1;
  646. vcpu->mmio_needed = 1;
  647. vcpu->mmio_is_write = 1;
  648. *(uint16_t *) data = vcpu->arch.gprs[rt];
  649. kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
  650. vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
  651. vcpu->arch.gprs[rt], *(uint32_t *) data);
  652. break;
  653. default:
  654. printk("Store not yet supported");
  655. er = EMULATE_FAIL;
  656. break;
  657. }
  658. /*
  659. * Rollback PC if emulation was unsuccessful
  660. */
  661. if (er == EMULATE_FAIL) {
  662. vcpu->arch.pc = curr_pc;
  663. }
  664. return er;
  665. }
  666. enum emulation_result
  667. kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
  668. struct kvm_run *run, struct kvm_vcpu *vcpu)
  669. {
  670. enum emulation_result er = EMULATE_DO_MMIO;
  671. int32_t op, base, rt, offset;
  672. uint32_t bytes;
  673. rt = (inst >> 16) & 0x1f;
  674. base = (inst >> 21) & 0x1f;
  675. offset = inst & 0xffff;
  676. op = (inst >> 26) & 0x3f;
  677. vcpu->arch.pending_load_cause = cause;
  678. vcpu->arch.io_gpr = rt;
  679. switch (op) {
  680. case lw_op:
  681. bytes = 4;
  682. if (bytes > sizeof(run->mmio.data)) {
  683. kvm_err("%s: bad MMIO length: %d\n", __func__,
  684. run->mmio.len);
  685. er = EMULATE_FAIL;
  686. break;
  687. }
  688. run->mmio.phys_addr =
  689. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  690. host_cp0_badvaddr);
  691. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  692. er = EMULATE_FAIL;
  693. break;
  694. }
  695. run->mmio.len = bytes;
  696. run->mmio.is_write = 0;
  697. vcpu->mmio_needed = 1;
  698. vcpu->mmio_is_write = 0;
  699. break;
  700. case lh_op:
  701. case lhu_op:
  702. bytes = 2;
  703. if (bytes > sizeof(run->mmio.data)) {
  704. kvm_err("%s: bad MMIO length: %d\n", __func__,
  705. run->mmio.len);
  706. er = EMULATE_FAIL;
  707. break;
  708. }
  709. run->mmio.phys_addr =
  710. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  711. host_cp0_badvaddr);
  712. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  713. er = EMULATE_FAIL;
  714. break;
  715. }
  716. run->mmio.len = bytes;
  717. run->mmio.is_write = 0;
  718. vcpu->mmio_needed = 1;
  719. vcpu->mmio_is_write = 0;
  720. if (op == lh_op)
  721. vcpu->mmio_needed = 2;
  722. else
  723. vcpu->mmio_needed = 1;
  724. break;
  725. case lbu_op:
  726. case lb_op:
  727. bytes = 1;
  728. if (bytes > sizeof(run->mmio.data)) {
  729. kvm_err("%s: bad MMIO length: %d\n", __func__,
  730. run->mmio.len);
  731. er = EMULATE_FAIL;
  732. break;
  733. }
  734. run->mmio.phys_addr =
  735. kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
  736. host_cp0_badvaddr);
  737. if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
  738. er = EMULATE_FAIL;
  739. break;
  740. }
  741. run->mmio.len = bytes;
  742. run->mmio.is_write = 0;
  743. vcpu->mmio_is_write = 0;
  744. if (op == lb_op)
  745. vcpu->mmio_needed = 2;
  746. else
  747. vcpu->mmio_needed = 1;
  748. break;
  749. default:
  750. printk("Load not yet supported");
  751. er = EMULATE_FAIL;
  752. break;
  753. }
  754. return er;
  755. }
  756. int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
  757. {
  758. unsigned long offset = (va & ~PAGE_MASK);
  759. struct kvm *kvm = vcpu->kvm;
  760. unsigned long pa;
  761. gfn_t gfn;
  762. pfn_t pfn;
  763. gfn = va >> PAGE_SHIFT;
  764. if (gfn >= kvm->arch.guest_pmap_npages) {
  765. printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
  766. kvm_mips_dump_host_tlbs();
  767. kvm_arch_vcpu_dump_regs(vcpu);
  768. return -1;
  769. }
  770. pfn = kvm->arch.guest_pmap[gfn];
  771. pa = (pfn << PAGE_SHIFT) | offset;
  772. printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
  773. mips32_SyncICache(CKSEG0ADDR(pa), 32);
  774. return 0;
  775. }
  776. #define MIPS_CACHE_OP_INDEX_INV 0x0
  777. #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
  778. #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
  779. #define MIPS_CACHE_OP_IMP 0x3
  780. #define MIPS_CACHE_OP_HIT_INV 0x4
  781. #define MIPS_CACHE_OP_FILL_WB_INV 0x5
  782. #define MIPS_CACHE_OP_HIT_HB 0x6
  783. #define MIPS_CACHE_OP_FETCH_LOCK 0x7
  784. #define MIPS_CACHE_ICACHE 0x0
  785. #define MIPS_CACHE_DCACHE 0x1
  786. #define MIPS_CACHE_SEC 0x3
  787. enum emulation_result
  788. kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
  789. struct kvm_run *run, struct kvm_vcpu *vcpu)
  790. {
  791. struct mips_coproc *cop0 = vcpu->arch.cop0;
  792. extern void (*r4k_blast_dcache) (void);
  793. extern void (*r4k_blast_icache) (void);
  794. enum emulation_result er = EMULATE_DONE;
  795. int32_t offset, cache, op_inst, op, base;
  796. struct kvm_vcpu_arch *arch = &vcpu->arch;
  797. unsigned long va;
  798. unsigned long curr_pc;
  799. /*
  800. * Update PC and hold onto current PC in case there is
  801. * an error and we want to rollback the PC
  802. */
  803. curr_pc = vcpu->arch.pc;
  804. er = update_pc(vcpu, cause);
  805. if (er == EMULATE_FAIL)
  806. return er;
  807. base = (inst >> 21) & 0x1f;
  808. op_inst = (inst >> 16) & 0x1f;
  809. offset = inst & 0xffff;
  810. cache = (inst >> 16) & 0x3;
  811. op = (inst >> 18) & 0x7;
  812. va = arch->gprs[base] + offset;
  813. kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
  814. cache, op, base, arch->gprs[base], offset);
  815. /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
  816. * the caches entirely by stepping through all the ways/indexes
  817. */
  818. if (op == MIPS_CACHE_OP_INDEX_INV) {
  819. kvm_debug
  820. ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
  821. vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
  822. arch->gprs[base], offset);
  823. if (cache == MIPS_CACHE_DCACHE)
  824. r4k_blast_dcache();
  825. else if (cache == MIPS_CACHE_ICACHE)
  826. r4k_blast_icache();
  827. else {
  828. printk("%s: unsupported CACHE INDEX operation\n",
  829. __func__);
  830. return EMULATE_FAIL;
  831. }
  832. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  833. kvm_mips_trans_cache_index(inst, opc, vcpu);
  834. #endif
  835. goto done;
  836. }
  837. preempt_disable();
  838. if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
  839. if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
  840. kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
  841. }
  842. } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
  843. KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
  844. int index;
  845. /* If an entry already exists then skip */
  846. if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
  847. goto skip_fault;
  848. }
  849. /* If address not in the guest TLB, then give the guest a fault, the
  850. * resulting handler will do the right thing
  851. */
  852. index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
  853. (kvm_read_c0_guest_entryhi
  854. (cop0) & ASID_MASK));
  855. if (index < 0) {
  856. vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
  857. vcpu->arch.host_cp0_badvaddr = va;
  858. er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
  859. vcpu);
  860. preempt_enable();
  861. goto dont_update_pc;
  862. } else {
  863. struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
  864. /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
  865. if (!TLB_IS_VALID(*tlb, va)) {
  866. er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
  867. run, vcpu);
  868. preempt_enable();
  869. goto dont_update_pc;
  870. } else {
  871. /* We fault an entry from the guest tlb to the shadow host TLB */
  872. kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
  873. NULL,
  874. NULL);
  875. }
  876. }
  877. } else {
  878. printk
  879. ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
  880. cache, op, base, arch->gprs[base], offset);
  881. er = EMULATE_FAIL;
  882. preempt_enable();
  883. goto dont_update_pc;
  884. }
  885. skip_fault:
  886. /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
  887. if (cache == MIPS_CACHE_DCACHE
  888. && (op == MIPS_CACHE_OP_FILL_WB_INV
  889. || op == MIPS_CACHE_OP_HIT_INV)) {
  890. flush_dcache_line(va);
  891. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  892. /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
  893. kvm_mips_trans_cache_va(inst, opc, vcpu);
  894. #endif
  895. } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
  896. flush_dcache_line(va);
  897. flush_icache_line(va);
  898. #ifdef CONFIG_KVM_MIPS_DYN_TRANS
  899. /* Replace the CACHE instruction, with a SYNCI */
  900. kvm_mips_trans_cache_va(inst, opc, vcpu);
  901. #endif
  902. } else {
  903. printk
  904. ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
  905. cache, op, base, arch->gprs[base], offset);
  906. er = EMULATE_FAIL;
  907. preempt_enable();
  908. goto dont_update_pc;
  909. }
  910. preempt_enable();
  911. dont_update_pc:
  912. /*
  913. * Rollback PC
  914. */
  915. vcpu->arch.pc = curr_pc;
  916. done:
  917. return er;
  918. }
  919. enum emulation_result
  920. kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
  921. struct kvm_run *run, struct kvm_vcpu *vcpu)
  922. {
  923. enum emulation_result er = EMULATE_DONE;
  924. uint32_t inst;
  925. /*
  926. * Fetch the instruction.
  927. */
  928. if (cause & CAUSEF_BD) {
  929. opc += 1;
  930. }
  931. inst = kvm_get_inst(opc, vcpu);
  932. switch (((union mips_instruction)inst).r_format.opcode) {
  933. case cop0_op:
  934. er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
  935. break;
  936. case sb_op:
  937. case sh_op:
  938. case sw_op:
  939. er = kvm_mips_emulate_store(inst, cause, run, vcpu);
  940. break;
  941. case lb_op:
  942. case lbu_op:
  943. case lhu_op:
  944. case lh_op:
  945. case lw_op:
  946. er = kvm_mips_emulate_load(inst, cause, run, vcpu);
  947. break;
  948. case cache_op:
  949. ++vcpu->stat.cache_exits;
  950. trace_kvm_exit(vcpu, CACHE_EXITS);
  951. er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
  952. break;
  953. default:
  954. printk("Instruction emulation not supported (%p/%#x)\n", opc,
  955. inst);
  956. kvm_arch_vcpu_dump_regs(vcpu);
  957. er = EMULATE_FAIL;
  958. break;
  959. }
  960. return er;
  961. }
  962. enum emulation_result
  963. kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
  964. struct kvm_run *run, struct kvm_vcpu *vcpu)
  965. {
  966. struct mips_coproc *cop0 = vcpu->arch.cop0;
  967. struct kvm_vcpu_arch *arch = &vcpu->arch;
  968. enum emulation_result er = EMULATE_DONE;
  969. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  970. /* save old pc */
  971. kvm_write_c0_guest_epc(cop0, arch->pc);
  972. kvm_set_c0_guest_status(cop0, ST0_EXL);
  973. if (cause & CAUSEF_BD)
  974. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  975. else
  976. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  977. kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
  978. kvm_change_c0_guest_cause(cop0, (0xff),
  979. (T_SYSCALL << CAUSEB_EXCCODE));
  980. /* Set PC to the exception entry point */
  981. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  982. } else {
  983. printk("Trying to deliver SYSCALL when EXL is already set\n");
  984. er = EMULATE_FAIL;
  985. }
  986. return er;
  987. }
  988. enum emulation_result
  989. kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
  990. struct kvm_run *run, struct kvm_vcpu *vcpu)
  991. {
  992. struct mips_coproc *cop0 = vcpu->arch.cop0;
  993. struct kvm_vcpu_arch *arch = &vcpu->arch;
  994. enum emulation_result er = EMULATE_DONE;
  995. unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
  996. (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
  997. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  998. /* save old pc */
  999. kvm_write_c0_guest_epc(cop0, arch->pc);
  1000. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1001. if (cause & CAUSEF_BD)
  1002. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1003. else
  1004. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1005. kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
  1006. arch->pc);
  1007. /* set pc to the exception entry point */
  1008. arch->pc = KVM_GUEST_KSEG0 + 0x0;
  1009. } else {
  1010. kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
  1011. arch->pc);
  1012. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1013. }
  1014. kvm_change_c0_guest_cause(cop0, (0xff),
  1015. (T_TLB_LD_MISS << CAUSEB_EXCCODE));
  1016. /* setup badvaddr, context and entryhi registers for the guest */
  1017. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1018. /* XXXKYMA: is the context register used by linux??? */
  1019. kvm_write_c0_guest_entryhi(cop0, entryhi);
  1020. /* Blow away the shadow host TLBs */
  1021. kvm_mips_flush_host_tlb(1);
  1022. return er;
  1023. }
  1024. enum emulation_result
  1025. kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
  1026. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1027. {
  1028. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1029. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1030. enum emulation_result er = EMULATE_DONE;
  1031. unsigned long entryhi =
  1032. (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
  1033. (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
  1034. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1035. /* save old pc */
  1036. kvm_write_c0_guest_epc(cop0, arch->pc);
  1037. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1038. if (cause & CAUSEF_BD)
  1039. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1040. else
  1041. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1042. kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
  1043. arch->pc);
  1044. /* set pc to the exception entry point */
  1045. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1046. } else {
  1047. kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
  1048. arch->pc);
  1049. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1050. }
  1051. kvm_change_c0_guest_cause(cop0, (0xff),
  1052. (T_TLB_LD_MISS << CAUSEB_EXCCODE));
  1053. /* setup badvaddr, context and entryhi registers for the guest */
  1054. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1055. /* XXXKYMA: is the context register used by linux??? */
  1056. kvm_write_c0_guest_entryhi(cop0, entryhi);
  1057. /* Blow away the shadow host TLBs */
  1058. kvm_mips_flush_host_tlb(1);
  1059. return er;
  1060. }
  1061. enum emulation_result
  1062. kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
  1063. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1064. {
  1065. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1066. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1067. enum emulation_result er = EMULATE_DONE;
  1068. unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
  1069. (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
  1070. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1071. /* save old pc */
  1072. kvm_write_c0_guest_epc(cop0, arch->pc);
  1073. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1074. if (cause & CAUSEF_BD)
  1075. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1076. else
  1077. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1078. kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
  1079. arch->pc);
  1080. /* Set PC to the exception entry point */
  1081. arch->pc = KVM_GUEST_KSEG0 + 0x0;
  1082. } else {
  1083. kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
  1084. arch->pc);
  1085. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1086. }
  1087. kvm_change_c0_guest_cause(cop0, (0xff),
  1088. (T_TLB_ST_MISS << CAUSEB_EXCCODE));
  1089. /* setup badvaddr, context and entryhi registers for the guest */
  1090. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1091. /* XXXKYMA: is the context register used by linux??? */
  1092. kvm_write_c0_guest_entryhi(cop0, entryhi);
  1093. /* Blow away the shadow host TLBs */
  1094. kvm_mips_flush_host_tlb(1);
  1095. return er;
  1096. }
  1097. enum emulation_result
  1098. kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
  1099. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1100. {
  1101. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1102. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1103. enum emulation_result er = EMULATE_DONE;
  1104. unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
  1105. (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
  1106. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1107. /* save old pc */
  1108. kvm_write_c0_guest_epc(cop0, arch->pc);
  1109. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1110. if (cause & CAUSEF_BD)
  1111. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1112. else
  1113. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1114. kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
  1115. arch->pc);
  1116. /* Set PC to the exception entry point */
  1117. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1118. } else {
  1119. kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
  1120. arch->pc);
  1121. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1122. }
  1123. kvm_change_c0_guest_cause(cop0, (0xff),
  1124. (T_TLB_ST_MISS << CAUSEB_EXCCODE));
  1125. /* setup badvaddr, context and entryhi registers for the guest */
  1126. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1127. /* XXXKYMA: is the context register used by linux??? */
  1128. kvm_write_c0_guest_entryhi(cop0, entryhi);
  1129. /* Blow away the shadow host TLBs */
  1130. kvm_mips_flush_host_tlb(1);
  1131. return er;
  1132. }
  1133. /* TLBMOD: store into address matching TLB with Dirty bit off */
  1134. enum emulation_result
  1135. kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
  1136. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1137. {
  1138. enum emulation_result er = EMULATE_DONE;
  1139. #ifdef DEBUG
  1140. /*
  1141. * If address not in the guest TLB, then we are in trouble
  1142. */
  1143. index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
  1144. if (index < 0) {
  1145. /* XXXKYMA Invalidate and retry */
  1146. kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
  1147. kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
  1148. __func__, entryhi);
  1149. kvm_mips_dump_guest_tlbs(vcpu);
  1150. kvm_mips_dump_host_tlbs();
  1151. return EMULATE_FAIL;
  1152. }
  1153. #endif
  1154. er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
  1155. return er;
  1156. }
  1157. enum emulation_result
  1158. kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
  1159. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1160. {
  1161. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1162. unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
  1163. (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
  1164. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1165. enum emulation_result er = EMULATE_DONE;
  1166. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1167. /* save old pc */
  1168. kvm_write_c0_guest_epc(cop0, arch->pc);
  1169. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1170. if (cause & CAUSEF_BD)
  1171. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1172. else
  1173. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1174. kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
  1175. arch->pc);
  1176. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1177. } else {
  1178. kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
  1179. arch->pc);
  1180. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1181. }
  1182. kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
  1183. /* setup badvaddr, context and entryhi registers for the guest */
  1184. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1185. /* XXXKYMA: is the context register used by linux??? */
  1186. kvm_write_c0_guest_entryhi(cop0, entryhi);
  1187. /* Blow away the shadow host TLBs */
  1188. kvm_mips_flush_host_tlb(1);
  1189. return er;
  1190. }
  1191. enum emulation_result
  1192. kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
  1193. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1194. {
  1195. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1196. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1197. enum emulation_result er = EMULATE_DONE;
  1198. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1199. /* save old pc */
  1200. kvm_write_c0_guest_epc(cop0, arch->pc);
  1201. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1202. if (cause & CAUSEF_BD)
  1203. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1204. else
  1205. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1206. }
  1207. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1208. kvm_change_c0_guest_cause(cop0, (0xff),
  1209. (T_COP_UNUSABLE << CAUSEB_EXCCODE));
  1210. kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
  1211. return er;
  1212. }
  1213. enum emulation_result
  1214. kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
  1215. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1216. {
  1217. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1218. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1219. enum emulation_result er = EMULATE_DONE;
  1220. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1221. /* save old pc */
  1222. kvm_write_c0_guest_epc(cop0, arch->pc);
  1223. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1224. if (cause & CAUSEF_BD)
  1225. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1226. else
  1227. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1228. kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
  1229. kvm_change_c0_guest_cause(cop0, (0xff),
  1230. (T_RES_INST << CAUSEB_EXCCODE));
  1231. /* Set PC to the exception entry point */
  1232. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1233. } else {
  1234. kvm_err("Trying to deliver RI when EXL is already set\n");
  1235. er = EMULATE_FAIL;
  1236. }
  1237. return er;
  1238. }
  1239. enum emulation_result
  1240. kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
  1241. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1242. {
  1243. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1244. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1245. enum emulation_result er = EMULATE_DONE;
  1246. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1247. /* save old pc */
  1248. kvm_write_c0_guest_epc(cop0, arch->pc);
  1249. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1250. if (cause & CAUSEF_BD)
  1251. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1252. else
  1253. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1254. kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
  1255. kvm_change_c0_guest_cause(cop0, (0xff),
  1256. (T_BREAK << CAUSEB_EXCCODE));
  1257. /* Set PC to the exception entry point */
  1258. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1259. } else {
  1260. printk("Trying to deliver BP when EXL is already set\n");
  1261. er = EMULATE_FAIL;
  1262. }
  1263. return er;
  1264. }
  1265. /*
  1266. * ll/sc, rdhwr, sync emulation
  1267. */
  1268. #define OPCODE 0xfc000000
  1269. #define BASE 0x03e00000
  1270. #define RT 0x001f0000
  1271. #define OFFSET 0x0000ffff
  1272. #define LL 0xc0000000
  1273. #define SC 0xe0000000
  1274. #define SPEC0 0x00000000
  1275. #define SPEC3 0x7c000000
  1276. #define RD 0x0000f800
  1277. #define FUNC 0x0000003f
  1278. #define SYNC 0x0000000f
  1279. #define RDHWR 0x0000003b
  1280. enum emulation_result
  1281. kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
  1282. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1283. {
  1284. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1285. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1286. enum emulation_result er = EMULATE_DONE;
  1287. unsigned long curr_pc;
  1288. uint32_t inst;
  1289. /*
  1290. * Update PC and hold onto current PC in case there is
  1291. * an error and we want to rollback the PC
  1292. */
  1293. curr_pc = vcpu->arch.pc;
  1294. er = update_pc(vcpu, cause);
  1295. if (er == EMULATE_FAIL)
  1296. return er;
  1297. /*
  1298. * Fetch the instruction.
  1299. */
  1300. if (cause & CAUSEF_BD)
  1301. opc += 1;
  1302. inst = kvm_get_inst(opc, vcpu);
  1303. if (inst == KVM_INVALID_INST) {
  1304. printk("%s: Cannot get inst @ %p\n", __func__, opc);
  1305. return EMULATE_FAIL;
  1306. }
  1307. if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
  1308. int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
  1309. int rd = (inst & RD) >> 11;
  1310. int rt = (inst & RT) >> 16;
  1311. /* If usermode, check RDHWR rd is allowed by guest HWREna */
  1312. if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
  1313. kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
  1314. rd, opc);
  1315. goto emulate_ri;
  1316. }
  1317. switch (rd) {
  1318. case 0: /* CPU number */
  1319. arch->gprs[rt] = 0;
  1320. break;
  1321. case 1: /* SYNCI length */
  1322. arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
  1323. current_cpu_data.icache.linesz);
  1324. break;
  1325. case 2: /* Read count register */
  1326. printk("RDHWR: Cont register\n");
  1327. arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
  1328. break;
  1329. case 3: /* Count register resolution */
  1330. switch (current_cpu_data.cputype) {
  1331. case CPU_20KC:
  1332. case CPU_25KF:
  1333. arch->gprs[rt] = 1;
  1334. break;
  1335. default:
  1336. arch->gprs[rt] = 2;
  1337. }
  1338. break;
  1339. case 29:
  1340. arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
  1341. break;
  1342. default:
  1343. kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
  1344. goto emulate_ri;
  1345. }
  1346. } else {
  1347. kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
  1348. goto emulate_ri;
  1349. }
  1350. return EMULATE_DONE;
  1351. emulate_ri:
  1352. /*
  1353. * Rollback PC (if in branch delay slot then the PC already points to
  1354. * branch target), and pass the RI exception to the guest OS.
  1355. */
  1356. vcpu->arch.pc = curr_pc;
  1357. return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
  1358. }
  1359. enum emulation_result
  1360. kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1361. {
  1362. unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
  1363. enum emulation_result er = EMULATE_DONE;
  1364. unsigned long curr_pc;
  1365. if (run->mmio.len > sizeof(*gpr)) {
  1366. printk("Bad MMIO length: %d", run->mmio.len);
  1367. er = EMULATE_FAIL;
  1368. goto done;
  1369. }
  1370. /*
  1371. * Update PC and hold onto current PC in case there is
  1372. * an error and we want to rollback the PC
  1373. */
  1374. curr_pc = vcpu->arch.pc;
  1375. er = update_pc(vcpu, vcpu->arch.pending_load_cause);
  1376. if (er == EMULATE_FAIL)
  1377. return er;
  1378. switch (run->mmio.len) {
  1379. case 4:
  1380. *gpr = *(int32_t *) run->mmio.data;
  1381. break;
  1382. case 2:
  1383. if (vcpu->mmio_needed == 2)
  1384. *gpr = *(int16_t *) run->mmio.data;
  1385. else
  1386. *gpr = *(int16_t *) run->mmio.data;
  1387. break;
  1388. case 1:
  1389. if (vcpu->mmio_needed == 2)
  1390. *gpr = *(int8_t *) run->mmio.data;
  1391. else
  1392. *gpr = *(u8 *) run->mmio.data;
  1393. break;
  1394. }
  1395. if (vcpu->arch.pending_load_cause & CAUSEF_BD)
  1396. kvm_debug
  1397. ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
  1398. vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
  1399. vcpu->mmio_needed);
  1400. done:
  1401. return er;
  1402. }
  1403. static enum emulation_result
  1404. kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
  1405. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1406. {
  1407. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  1408. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1409. struct kvm_vcpu_arch *arch = &vcpu->arch;
  1410. enum emulation_result er = EMULATE_DONE;
  1411. if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
  1412. /* save old pc */
  1413. kvm_write_c0_guest_epc(cop0, arch->pc);
  1414. kvm_set_c0_guest_status(cop0, ST0_EXL);
  1415. if (cause & CAUSEF_BD)
  1416. kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
  1417. else
  1418. kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
  1419. kvm_change_c0_guest_cause(cop0, (0xff),
  1420. (exccode << CAUSEB_EXCCODE));
  1421. /* Set PC to the exception entry point */
  1422. arch->pc = KVM_GUEST_KSEG0 + 0x180;
  1423. kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
  1424. kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
  1425. exccode, kvm_read_c0_guest_epc(cop0),
  1426. kvm_read_c0_guest_badvaddr(cop0));
  1427. } else {
  1428. printk("Trying to deliver EXC when EXL is already set\n");
  1429. er = EMULATE_FAIL;
  1430. }
  1431. return er;
  1432. }
  1433. enum emulation_result
  1434. kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
  1435. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1436. {
  1437. enum emulation_result er = EMULATE_DONE;
  1438. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  1439. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  1440. int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
  1441. if (usermode) {
  1442. switch (exccode) {
  1443. case T_INT:
  1444. case T_SYSCALL:
  1445. case T_BREAK:
  1446. case T_RES_INST:
  1447. break;
  1448. case T_COP_UNUSABLE:
  1449. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
  1450. er = EMULATE_PRIV_FAIL;
  1451. break;
  1452. case T_TLB_MOD:
  1453. break;
  1454. case T_TLB_LD_MISS:
  1455. /* We we are accessing Guest kernel space, then send an address error exception to the guest */
  1456. if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
  1457. printk("%s: LD MISS @ %#lx\n", __func__,
  1458. badvaddr);
  1459. cause &= ~0xff;
  1460. cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
  1461. er = EMULATE_PRIV_FAIL;
  1462. }
  1463. break;
  1464. case T_TLB_ST_MISS:
  1465. /* We we are accessing Guest kernel space, then send an address error exception to the guest */
  1466. if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
  1467. printk("%s: ST MISS @ %#lx\n", __func__,
  1468. badvaddr);
  1469. cause &= ~0xff;
  1470. cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
  1471. er = EMULATE_PRIV_FAIL;
  1472. }
  1473. break;
  1474. case T_ADDR_ERR_ST:
  1475. printk("%s: address error ST @ %#lx\n", __func__,
  1476. badvaddr);
  1477. if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
  1478. cause &= ~0xff;
  1479. cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
  1480. }
  1481. er = EMULATE_PRIV_FAIL;
  1482. break;
  1483. case T_ADDR_ERR_LD:
  1484. printk("%s: address error LD @ %#lx\n", __func__,
  1485. badvaddr);
  1486. if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
  1487. cause &= ~0xff;
  1488. cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
  1489. }
  1490. er = EMULATE_PRIV_FAIL;
  1491. break;
  1492. default:
  1493. er = EMULATE_PRIV_FAIL;
  1494. break;
  1495. }
  1496. }
  1497. if (er == EMULATE_PRIV_FAIL) {
  1498. kvm_mips_emulate_exc(cause, opc, run, vcpu);
  1499. }
  1500. return er;
  1501. }
  1502. /* User Address (UA) fault, this could happen if
  1503. * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
  1504. * case we pass on the fault to the guest kernel and let it handle it.
  1505. * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
  1506. * case we inject the TLB from the Guest TLB into the shadow host TLB
  1507. */
  1508. enum emulation_result
  1509. kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
  1510. struct kvm_run *run, struct kvm_vcpu *vcpu)
  1511. {
  1512. enum emulation_result er = EMULATE_DONE;
  1513. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  1514. unsigned long va = vcpu->arch.host_cp0_badvaddr;
  1515. int index;
  1516. kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
  1517. vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
  1518. /* KVM would not have got the exception if this entry was valid in the shadow host TLB
  1519. * Check the Guest TLB, if the entry is not there then send the guest an
  1520. * exception. The guest exc handler should then inject an entry into the
  1521. * guest TLB
  1522. */
  1523. index = kvm_mips_guest_tlb_lookup(vcpu,
  1524. (va & VPN2_MASK) |
  1525. (kvm_read_c0_guest_entryhi
  1526. (vcpu->arch.cop0) & ASID_MASK));
  1527. if (index < 0) {
  1528. if (exccode == T_TLB_LD_MISS) {
  1529. er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
  1530. } else if (exccode == T_TLB_ST_MISS) {
  1531. er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
  1532. } else {
  1533. printk("%s: invalid exc code: %d\n", __func__, exccode);
  1534. er = EMULATE_FAIL;
  1535. }
  1536. } else {
  1537. struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
  1538. /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
  1539. if (!TLB_IS_VALID(*tlb, va)) {
  1540. if (exccode == T_TLB_LD_MISS) {
  1541. er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
  1542. vcpu);
  1543. } else if (exccode == T_TLB_ST_MISS) {
  1544. er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
  1545. vcpu);
  1546. } else {
  1547. printk("%s: invalid exc code: %d\n", __func__,
  1548. exccode);
  1549. er = EMULATE_FAIL;
  1550. }
  1551. } else {
  1552. #ifdef DEBUG
  1553. kvm_debug
  1554. ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
  1555. tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
  1556. #endif
  1557. /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
  1558. kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
  1559. NULL);
  1560. }
  1561. }
  1562. return er;
  1563. }