trap_emul.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/kvm_host.h>
  16. #include "interrupt.h"
  17. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  18. {
  19. gpa_t gpa;
  20. uint32_t kseg = KSEGX(gva);
  21. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  22. gpa = CPHYSADDR(gva);
  23. else {
  24. kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  25. kvm_mips_dump_host_tlbs();
  26. gpa = KVM_INVALID_ADDR;
  27. }
  28. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  29. return gpa;
  30. }
  31. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  32. {
  33. struct mips_coproc *cop0 = vcpu->arch.cop0;
  34. struct kvm_run *run = vcpu->run;
  35. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  36. unsigned long cause = vcpu->arch.host_cp0_cause;
  37. enum emulation_result er = EMULATE_DONE;
  38. int ret = RESUME_GUEST;
  39. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
  40. /* FPU Unusable */
  41. if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
  42. (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
  43. /*
  44. * Unusable/no FPU in guest:
  45. * deliver guest COP1 Unusable Exception
  46. */
  47. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  48. } else {
  49. /* Restore FPU state */
  50. kvm_own_fpu(vcpu);
  51. er = EMULATE_DONE;
  52. }
  53. } else {
  54. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  55. }
  56. switch (er) {
  57. case EMULATE_DONE:
  58. ret = RESUME_GUEST;
  59. break;
  60. case EMULATE_FAIL:
  61. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  62. ret = RESUME_HOST;
  63. break;
  64. case EMULATE_WAIT:
  65. run->exit_reason = KVM_EXIT_INTR;
  66. ret = RESUME_HOST;
  67. break;
  68. default:
  69. BUG();
  70. }
  71. return ret;
  72. }
  73. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  74. {
  75. struct kvm_run *run = vcpu->run;
  76. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  77. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  78. unsigned long cause = vcpu->arch.host_cp0_cause;
  79. enum emulation_result er = EMULATE_DONE;
  80. int ret = RESUME_GUEST;
  81. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  82. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  83. kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  84. cause, opc, badvaddr);
  85. er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
  86. if (er == EMULATE_DONE)
  87. ret = RESUME_GUEST;
  88. else {
  89. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  90. ret = RESUME_HOST;
  91. }
  92. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  93. /*
  94. * XXXKYMA: The guest kernel does not expect to get this fault
  95. * when we are not using HIGHMEM. Need to address this in a
  96. * HIGHMEM kernel
  97. */
  98. kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
  99. cause, opc, badvaddr);
  100. kvm_mips_dump_host_tlbs();
  101. kvm_arch_vcpu_dump_regs(vcpu);
  102. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  103. ret = RESUME_HOST;
  104. } else {
  105. kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  106. cause, opc, badvaddr);
  107. kvm_mips_dump_host_tlbs();
  108. kvm_arch_vcpu_dump_regs(vcpu);
  109. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  110. ret = RESUME_HOST;
  111. }
  112. return ret;
  113. }
  114. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  115. {
  116. struct kvm_run *run = vcpu->run;
  117. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  118. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  119. unsigned long cause = vcpu->arch.host_cp0_cause;
  120. enum emulation_result er = EMULATE_DONE;
  121. int ret = RESUME_GUEST;
  122. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  123. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  124. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  125. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  126. ret = RESUME_HOST;
  127. }
  128. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  129. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  130. kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  131. cause, opc, badvaddr);
  132. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  133. if (er == EMULATE_DONE)
  134. ret = RESUME_GUEST;
  135. else {
  136. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  137. ret = RESUME_HOST;
  138. }
  139. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  140. /*
  141. * All KSEG0 faults are handled by KVM, as the guest kernel does
  142. * not expect to ever get them
  143. */
  144. if (kvm_mips_handle_kseg0_tlb_fault
  145. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  146. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  147. ret = RESUME_HOST;
  148. }
  149. } else {
  150. kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  151. cause, opc, badvaddr);
  152. kvm_mips_dump_host_tlbs();
  153. kvm_arch_vcpu_dump_regs(vcpu);
  154. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  155. ret = RESUME_HOST;
  156. }
  157. return ret;
  158. }
  159. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  160. {
  161. struct kvm_run *run = vcpu->run;
  162. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  163. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  164. unsigned long cause = vcpu->arch.host_cp0_cause;
  165. enum emulation_result er = EMULATE_DONE;
  166. int ret = RESUME_GUEST;
  167. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  168. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  169. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  170. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  171. ret = RESUME_HOST;
  172. }
  173. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  174. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  175. kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
  176. vcpu->arch.pc, badvaddr);
  177. /*
  178. * User Address (UA) fault, this could happen if
  179. * (1) TLB entry not present/valid in both Guest and shadow host
  180. * TLBs, in this case we pass on the fault to the guest
  181. * kernel and let it handle it.
  182. * (2) TLB entry is present in the Guest TLB but not in the
  183. * shadow, in this case we inject the TLB from the Guest TLB
  184. * into the shadow host TLB
  185. */
  186. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  187. if (er == EMULATE_DONE)
  188. ret = RESUME_GUEST;
  189. else {
  190. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  191. ret = RESUME_HOST;
  192. }
  193. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  194. if (kvm_mips_handle_kseg0_tlb_fault
  195. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  196. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  197. ret = RESUME_HOST;
  198. }
  199. } else {
  200. kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  201. cause, opc, badvaddr);
  202. kvm_mips_dump_host_tlbs();
  203. kvm_arch_vcpu_dump_regs(vcpu);
  204. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  205. ret = RESUME_HOST;
  206. }
  207. return ret;
  208. }
  209. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  210. {
  211. struct kvm_run *run = vcpu->run;
  212. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  213. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  214. unsigned long cause = vcpu->arch.host_cp0_cause;
  215. enum emulation_result er = EMULATE_DONE;
  216. int ret = RESUME_GUEST;
  217. if (KVM_GUEST_KERNEL_MODE(vcpu)
  218. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  219. kvm_debug("Emulate Store to MMIO space\n");
  220. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  221. if (er == EMULATE_FAIL) {
  222. kvm_err("Emulate Store to MMIO space failed\n");
  223. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  224. ret = RESUME_HOST;
  225. } else {
  226. run->exit_reason = KVM_EXIT_MMIO;
  227. ret = RESUME_HOST;
  228. }
  229. } else {
  230. kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  231. cause, opc, badvaddr);
  232. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  233. ret = RESUME_HOST;
  234. }
  235. return ret;
  236. }
  237. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  238. {
  239. struct kvm_run *run = vcpu->run;
  240. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  241. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  242. unsigned long cause = vcpu->arch.host_cp0_cause;
  243. enum emulation_result er = EMULATE_DONE;
  244. int ret = RESUME_GUEST;
  245. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  246. kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
  247. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  248. if (er == EMULATE_FAIL) {
  249. kvm_err("Emulate Load from MMIO space failed\n");
  250. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  251. ret = RESUME_HOST;
  252. } else {
  253. run->exit_reason = KVM_EXIT_MMIO;
  254. ret = RESUME_HOST;
  255. }
  256. } else {
  257. kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  258. cause, opc, badvaddr);
  259. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  260. ret = RESUME_HOST;
  261. er = EMULATE_FAIL;
  262. }
  263. return ret;
  264. }
  265. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  266. {
  267. struct kvm_run *run = vcpu->run;
  268. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  269. unsigned long cause = vcpu->arch.host_cp0_cause;
  270. enum emulation_result er = EMULATE_DONE;
  271. int ret = RESUME_GUEST;
  272. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  273. if (er == EMULATE_DONE)
  274. ret = RESUME_GUEST;
  275. else {
  276. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  277. ret = RESUME_HOST;
  278. }
  279. return ret;
  280. }
  281. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  282. {
  283. struct kvm_run *run = vcpu->run;
  284. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  285. unsigned long cause = vcpu->arch.host_cp0_cause;
  286. enum emulation_result er = EMULATE_DONE;
  287. int ret = RESUME_GUEST;
  288. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  289. if (er == EMULATE_DONE)
  290. ret = RESUME_GUEST;
  291. else {
  292. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  293. ret = RESUME_HOST;
  294. }
  295. return ret;
  296. }
  297. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  298. {
  299. struct kvm_run *run = vcpu->run;
  300. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  301. unsigned long cause = vcpu->arch.host_cp0_cause;
  302. enum emulation_result er = EMULATE_DONE;
  303. int ret = RESUME_GUEST;
  304. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  305. if (er == EMULATE_DONE)
  306. ret = RESUME_GUEST;
  307. else {
  308. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  309. ret = RESUME_HOST;
  310. }
  311. return ret;
  312. }
  313. static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
  314. {
  315. struct kvm_run *run = vcpu->run;
  316. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  317. unsigned long cause = vcpu->arch.host_cp0_cause;
  318. enum emulation_result er = EMULATE_DONE;
  319. int ret = RESUME_GUEST;
  320. er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
  321. if (er == EMULATE_DONE) {
  322. ret = RESUME_GUEST;
  323. } else {
  324. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  325. ret = RESUME_HOST;
  326. }
  327. return ret;
  328. }
  329. static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
  330. {
  331. struct kvm_run *run = vcpu->run;
  332. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  333. unsigned long cause = vcpu->arch.host_cp0_cause;
  334. enum emulation_result er = EMULATE_DONE;
  335. int ret = RESUME_GUEST;
  336. er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
  337. if (er == EMULATE_DONE) {
  338. ret = RESUME_GUEST;
  339. } else {
  340. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  341. ret = RESUME_HOST;
  342. }
  343. return ret;
  344. }
  345. static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
  346. {
  347. struct kvm_run *run = vcpu->run;
  348. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  349. unsigned long cause = vcpu->arch.host_cp0_cause;
  350. enum emulation_result er = EMULATE_DONE;
  351. int ret = RESUME_GUEST;
  352. er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
  353. if (er == EMULATE_DONE) {
  354. ret = RESUME_GUEST;
  355. } else {
  356. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  357. ret = RESUME_HOST;
  358. }
  359. return ret;
  360. }
  361. /**
  362. * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
  363. * @vcpu: Virtual CPU context.
  364. *
  365. * Handle when the guest attempts to use MSA when it is disabled.
  366. */
  367. static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
  368. {
  369. struct mips_coproc *cop0 = vcpu->arch.cop0;
  370. struct kvm_run *run = vcpu->run;
  371. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  372. unsigned long cause = vcpu->arch.host_cp0_cause;
  373. enum emulation_result er = EMULATE_DONE;
  374. int ret = RESUME_GUEST;
  375. if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
  376. (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
  377. /*
  378. * No MSA in guest, or FPU enabled and not in FR=1 mode,
  379. * guest reserved instruction exception
  380. */
  381. er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
  382. } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
  383. /* MSA disabled by guest, guest MSA disabled exception */
  384. er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
  385. } else {
  386. /* Restore MSA/FPU state */
  387. kvm_own_msa(vcpu);
  388. er = EMULATE_DONE;
  389. }
  390. switch (er) {
  391. case EMULATE_DONE:
  392. ret = RESUME_GUEST;
  393. break;
  394. case EMULATE_FAIL:
  395. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  396. ret = RESUME_HOST;
  397. break;
  398. default:
  399. BUG();
  400. }
  401. return ret;
  402. }
  403. static int kvm_trap_emul_vm_init(struct kvm *kvm)
  404. {
  405. return 0;
  406. }
  407. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  408. {
  409. return 0;
  410. }
  411. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  412. {
  413. struct mips_coproc *cop0 = vcpu->arch.cop0;
  414. uint32_t config1;
  415. int vcpu_id = vcpu->vcpu_id;
  416. /*
  417. * Arch specific stuff, set up config registers properly so that the
  418. * guest will come up as expected, for now we simulate a MIPS 24kc
  419. */
  420. kvm_write_c0_guest_prid(cop0, 0x00019300);
  421. /* Have config1, Cacheable, noncoherent, write-back, write allocate */
  422. kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
  423. (0x1 << CP0C0_AR) |
  424. (MMU_TYPE_R4000 << CP0C0_MT));
  425. /* Read the cache characteristics from the host Config1 Register */
  426. config1 = (read_c0_config1() & ~0x7f);
  427. /* Set up MMU size */
  428. config1 &= ~(0x3f << 25);
  429. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  430. /* We unset some bits that we aren't emulating */
  431. config1 &=
  432. ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
  433. (1 << CP0C1_WR) | (1 << CP0C1_CA));
  434. kvm_write_c0_guest_config1(cop0, config1);
  435. /* Have config3, no tertiary/secondary caches implemented */
  436. kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
  437. /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
  438. /* Have config4, UserLocal */
  439. kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
  440. /* Have config5 */
  441. kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
  442. /* No config6 */
  443. kvm_write_c0_guest_config5(cop0, 0);
  444. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  445. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  446. /*
  447. * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
  448. */
  449. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  450. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  451. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
  452. (vcpu_id & MIPS_EBASE_CPUNUM));
  453. return 0;
  454. }
  455. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  456. const struct kvm_one_reg *reg,
  457. s64 *v)
  458. {
  459. switch (reg->id) {
  460. case KVM_REG_MIPS_CP0_COUNT:
  461. *v = kvm_mips_read_count(vcpu);
  462. break;
  463. case KVM_REG_MIPS_COUNT_CTL:
  464. *v = vcpu->arch.count_ctl;
  465. break;
  466. case KVM_REG_MIPS_COUNT_RESUME:
  467. *v = ktime_to_ns(vcpu->arch.count_resume);
  468. break;
  469. case KVM_REG_MIPS_COUNT_HZ:
  470. *v = vcpu->arch.count_hz;
  471. break;
  472. default:
  473. return -EINVAL;
  474. }
  475. return 0;
  476. }
  477. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  478. const struct kvm_one_reg *reg,
  479. s64 v)
  480. {
  481. struct mips_coproc *cop0 = vcpu->arch.cop0;
  482. int ret = 0;
  483. unsigned int cur, change;
  484. switch (reg->id) {
  485. case KVM_REG_MIPS_CP0_COUNT:
  486. kvm_mips_write_count(vcpu, v);
  487. break;
  488. case KVM_REG_MIPS_CP0_COMPARE:
  489. kvm_mips_write_compare(vcpu, v, false);
  490. break;
  491. case KVM_REG_MIPS_CP0_CAUSE:
  492. /*
  493. * If the timer is stopped or started (DC bit) it must look
  494. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  495. * A timer interrupt should not happen in between.
  496. */
  497. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  498. if (v & CAUSEF_DC) {
  499. /* disable timer first */
  500. kvm_mips_count_disable_cause(vcpu);
  501. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  502. } else {
  503. /* enable timer last */
  504. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  505. kvm_mips_count_enable_cause(vcpu);
  506. }
  507. } else {
  508. kvm_write_c0_guest_cause(cop0, v);
  509. }
  510. break;
  511. case KVM_REG_MIPS_CP0_CONFIG:
  512. /* read-only for now */
  513. break;
  514. case KVM_REG_MIPS_CP0_CONFIG1:
  515. cur = kvm_read_c0_guest_config1(cop0);
  516. change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
  517. if (change) {
  518. v = cur ^ change;
  519. kvm_write_c0_guest_config1(cop0, v);
  520. }
  521. break;
  522. case KVM_REG_MIPS_CP0_CONFIG2:
  523. /* read-only for now */
  524. break;
  525. case KVM_REG_MIPS_CP0_CONFIG3:
  526. cur = kvm_read_c0_guest_config3(cop0);
  527. change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
  528. if (change) {
  529. v = cur ^ change;
  530. kvm_write_c0_guest_config3(cop0, v);
  531. }
  532. break;
  533. case KVM_REG_MIPS_CP0_CONFIG4:
  534. cur = kvm_read_c0_guest_config4(cop0);
  535. change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
  536. if (change) {
  537. v = cur ^ change;
  538. kvm_write_c0_guest_config4(cop0, v);
  539. }
  540. break;
  541. case KVM_REG_MIPS_CP0_CONFIG5:
  542. cur = kvm_read_c0_guest_config5(cop0);
  543. change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
  544. if (change) {
  545. v = cur ^ change;
  546. kvm_write_c0_guest_config5(cop0, v);
  547. }
  548. break;
  549. case KVM_REG_MIPS_COUNT_CTL:
  550. ret = kvm_mips_set_count_ctl(vcpu, v);
  551. break;
  552. case KVM_REG_MIPS_COUNT_RESUME:
  553. ret = kvm_mips_set_count_resume(vcpu, v);
  554. break;
  555. case KVM_REG_MIPS_COUNT_HZ:
  556. ret = kvm_mips_set_count_hz(vcpu, v);
  557. break;
  558. default:
  559. return -EINVAL;
  560. }
  561. return ret;
  562. }
  563. static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
  564. {
  565. kvm_lose_fpu(vcpu);
  566. return 0;
  567. }
  568. static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
  569. {
  570. return 0;
  571. }
  572. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  573. /* exit handlers */
  574. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  575. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  576. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  577. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  578. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  579. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  580. .handle_syscall = kvm_trap_emul_handle_syscall,
  581. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  582. .handle_break = kvm_trap_emul_handle_break,
  583. .handle_trap = kvm_trap_emul_handle_trap,
  584. .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
  585. .handle_fpe = kvm_trap_emul_handle_fpe,
  586. .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
  587. .vm_init = kvm_trap_emul_vm_init,
  588. .vcpu_init = kvm_trap_emul_vcpu_init,
  589. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  590. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  591. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  592. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  593. .queue_io_int = kvm_mips_queue_io_int_cb,
  594. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  595. .irq_deliver = kvm_mips_irq_deliver_cb,
  596. .irq_clear = kvm_mips_irq_clear_cb,
  597. .get_one_reg = kvm_trap_emul_get_one_reg,
  598. .set_one_reg = kvm_trap_emul_set_one_reg,
  599. .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
  600. .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
  601. };
  602. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  603. {
  604. *install_callbacks = &kvm_trap_emul_callbacks;
  605. return 0;
  606. }