kvm_trap_emul.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/kvm_host.h>
  16. #include "kvm_mips_opcode.h"
  17. #include "kvm_mips_int.h"
  18. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  19. {
  20. gpa_t gpa;
  21. uint32_t kseg = KSEGX(gva);
  22. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  23. gpa = CPHYSADDR(gva);
  24. else {
  25. printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  26. kvm_mips_dump_host_tlbs();
  27. gpa = KVM_INVALID_ADDR;
  28. }
  29. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  30. return gpa;
  31. }
  32. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  33. {
  34. struct kvm_run *run = vcpu->run;
  35. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  36. unsigned long cause = vcpu->arch.host_cp0_cause;
  37. enum emulation_result er = EMULATE_DONE;
  38. int ret = RESUME_GUEST;
  39. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
  40. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  41. } else
  42. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  43. switch (er) {
  44. case EMULATE_DONE:
  45. ret = RESUME_GUEST;
  46. break;
  47. case EMULATE_FAIL:
  48. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  49. ret = RESUME_HOST;
  50. break;
  51. case EMULATE_WAIT:
  52. run->exit_reason = KVM_EXIT_INTR;
  53. ret = RESUME_HOST;
  54. break;
  55. default:
  56. BUG();
  57. }
  58. return ret;
  59. }
  60. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  61. {
  62. struct kvm_run *run = vcpu->run;
  63. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  64. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  65. unsigned long cause = vcpu->arch.host_cp0_cause;
  66. enum emulation_result er = EMULATE_DONE;
  67. int ret = RESUME_GUEST;
  68. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  69. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  70. kvm_debug
  71. ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  72. cause, opc, badvaddr);
  73. er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
  74. if (er == EMULATE_DONE)
  75. ret = RESUME_GUEST;
  76. else {
  77. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  78. ret = RESUME_HOST;
  79. }
  80. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  81. /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
  82. * using HIGHMEM. Need to address this in a HIGHMEM kernel
  83. */
  84. printk
  85. ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
  86. cause, opc, badvaddr);
  87. kvm_mips_dump_host_tlbs();
  88. kvm_arch_vcpu_dump_regs(vcpu);
  89. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  90. ret = RESUME_HOST;
  91. } else {
  92. printk
  93. ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  94. cause, opc, badvaddr);
  95. kvm_mips_dump_host_tlbs();
  96. kvm_arch_vcpu_dump_regs(vcpu);
  97. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  98. ret = RESUME_HOST;
  99. }
  100. return ret;
  101. }
  102. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  103. {
  104. struct kvm_run *run = vcpu->run;
  105. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  106. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  107. unsigned long cause = vcpu->arch.host_cp0_cause;
  108. enum emulation_result er = EMULATE_DONE;
  109. int ret = RESUME_GUEST;
  110. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  111. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  112. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  113. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  114. ret = RESUME_HOST;
  115. }
  116. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  117. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  118. kvm_debug
  119. ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  120. cause, opc, badvaddr);
  121. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  122. if (er == EMULATE_DONE)
  123. ret = RESUME_GUEST;
  124. else {
  125. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  126. ret = RESUME_HOST;
  127. }
  128. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  129. /* All KSEG0 faults are handled by KVM, as the guest kernel does not
  130. * expect to ever get them
  131. */
  132. if (kvm_mips_handle_kseg0_tlb_fault
  133. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  134. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  135. ret = RESUME_HOST;
  136. }
  137. } else {
  138. kvm_err
  139. ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  140. cause, opc, badvaddr);
  141. kvm_mips_dump_host_tlbs();
  142. kvm_arch_vcpu_dump_regs(vcpu);
  143. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  144. ret = RESUME_HOST;
  145. }
  146. return ret;
  147. }
  148. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  149. {
  150. struct kvm_run *run = vcpu->run;
  151. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  152. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  153. unsigned long cause = vcpu->arch.host_cp0_cause;
  154. enum emulation_result er = EMULATE_DONE;
  155. int ret = RESUME_GUEST;
  156. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  157. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  158. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  159. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  160. ret = RESUME_HOST;
  161. }
  162. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  163. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  164. kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
  165. vcpu->arch.pc, badvaddr);
  166. /* User Address (UA) fault, this could happen if
  167. * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
  168. * case we pass on the fault to the guest kernel and let it handle it.
  169. * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
  170. * case we inject the TLB from the Guest TLB into the shadow host TLB
  171. */
  172. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  173. if (er == EMULATE_DONE)
  174. ret = RESUME_GUEST;
  175. else {
  176. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  177. ret = RESUME_HOST;
  178. }
  179. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  180. if (kvm_mips_handle_kseg0_tlb_fault
  181. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  182. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  183. ret = RESUME_HOST;
  184. }
  185. } else {
  186. printk
  187. ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  188. cause, opc, badvaddr);
  189. kvm_mips_dump_host_tlbs();
  190. kvm_arch_vcpu_dump_regs(vcpu);
  191. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  192. ret = RESUME_HOST;
  193. }
  194. return ret;
  195. }
  196. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  197. {
  198. struct kvm_run *run = vcpu->run;
  199. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  200. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  201. unsigned long cause = vcpu->arch.host_cp0_cause;
  202. enum emulation_result er = EMULATE_DONE;
  203. int ret = RESUME_GUEST;
  204. if (KVM_GUEST_KERNEL_MODE(vcpu)
  205. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  206. kvm_debug("Emulate Store to MMIO space\n");
  207. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  208. if (er == EMULATE_FAIL) {
  209. printk("Emulate Store to MMIO space failed\n");
  210. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  211. ret = RESUME_HOST;
  212. } else {
  213. run->exit_reason = KVM_EXIT_MMIO;
  214. ret = RESUME_HOST;
  215. }
  216. } else {
  217. printk
  218. ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  219. cause, opc, badvaddr);
  220. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  221. ret = RESUME_HOST;
  222. }
  223. return ret;
  224. }
  225. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  226. {
  227. struct kvm_run *run = vcpu->run;
  228. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  229. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  230. unsigned long cause = vcpu->arch.host_cp0_cause;
  231. enum emulation_result er = EMULATE_DONE;
  232. int ret = RESUME_GUEST;
  233. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  234. kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
  235. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  236. if (er == EMULATE_FAIL) {
  237. printk("Emulate Load from MMIO space failed\n");
  238. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  239. ret = RESUME_HOST;
  240. } else {
  241. run->exit_reason = KVM_EXIT_MMIO;
  242. ret = RESUME_HOST;
  243. }
  244. } else {
  245. printk
  246. ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  247. cause, opc, badvaddr);
  248. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  249. ret = RESUME_HOST;
  250. er = EMULATE_FAIL;
  251. }
  252. return ret;
  253. }
  254. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  255. {
  256. struct kvm_run *run = vcpu->run;
  257. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  258. unsigned long cause = vcpu->arch.host_cp0_cause;
  259. enum emulation_result er = EMULATE_DONE;
  260. int ret = RESUME_GUEST;
  261. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  262. if (er == EMULATE_DONE)
  263. ret = RESUME_GUEST;
  264. else {
  265. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  266. ret = RESUME_HOST;
  267. }
  268. return ret;
  269. }
  270. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  271. {
  272. struct kvm_run *run = vcpu->run;
  273. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  274. unsigned long cause = vcpu->arch.host_cp0_cause;
  275. enum emulation_result er = EMULATE_DONE;
  276. int ret = RESUME_GUEST;
  277. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  278. if (er == EMULATE_DONE)
  279. ret = RESUME_GUEST;
  280. else {
  281. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  282. ret = RESUME_HOST;
  283. }
  284. return ret;
  285. }
  286. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  287. {
  288. struct kvm_run *run = vcpu->run;
  289. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  290. unsigned long cause = vcpu->arch.host_cp0_cause;
  291. enum emulation_result er = EMULATE_DONE;
  292. int ret = RESUME_GUEST;
  293. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  294. if (er == EMULATE_DONE)
  295. ret = RESUME_GUEST;
  296. else {
  297. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  298. ret = RESUME_HOST;
  299. }
  300. return ret;
  301. }
  302. static int kvm_trap_emul_vm_init(struct kvm *kvm)
  303. {
  304. return 0;
  305. }
  306. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  307. {
  308. return 0;
  309. }
  310. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  311. {
  312. struct mips_coproc *cop0 = vcpu->arch.cop0;
  313. uint32_t config1;
  314. int vcpu_id = vcpu->vcpu_id;
  315. /* Arch specific stuff, set up config registers properly so that the
  316. * guest will come up as expected, for now we simulate a
  317. * MIPS 24kc
  318. */
  319. kvm_write_c0_guest_prid(cop0, 0x00019300);
  320. kvm_write_c0_guest_config(cop0,
  321. MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
  322. (MMU_TYPE_R4000 << CP0C0_MT));
  323. /* Read the cache characteristics from the host Config1 Register */
  324. config1 = (read_c0_config1() & ~0x7f);
  325. /* Set up MMU size */
  326. config1 &= ~(0x3f << 25);
  327. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  328. /* We unset some bits that we aren't emulating */
  329. config1 &=
  330. ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
  331. (1 << CP0C1_WR) | (1 << CP0C1_CA));
  332. kvm_write_c0_guest_config1(cop0, config1);
  333. kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
  334. /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
  335. kvm_write_c0_guest_config3(cop0,
  336. MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
  337. CP0C3_ULRI));
  338. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  339. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  340. /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
  341. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  342. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  343. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
  344. return 0;
  345. }
  346. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  347. const struct kvm_one_reg *reg,
  348. s64 *v)
  349. {
  350. switch (reg->id) {
  351. case KVM_REG_MIPS_CP0_COUNT:
  352. *v = kvm_mips_read_count(vcpu);
  353. break;
  354. case KVM_REG_MIPS_COUNT_CTL:
  355. *v = vcpu->arch.count_ctl;
  356. break;
  357. case KVM_REG_MIPS_COUNT_RESUME:
  358. *v = ktime_to_ns(vcpu->arch.count_resume);
  359. break;
  360. case KVM_REG_MIPS_COUNT_HZ:
  361. *v = vcpu->arch.count_hz;
  362. break;
  363. default:
  364. return -EINVAL;
  365. }
  366. return 0;
  367. }
  368. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  369. const struct kvm_one_reg *reg,
  370. s64 v)
  371. {
  372. struct mips_coproc *cop0 = vcpu->arch.cop0;
  373. int ret = 0;
  374. switch (reg->id) {
  375. case KVM_REG_MIPS_CP0_COUNT:
  376. kvm_mips_write_count(vcpu, v);
  377. break;
  378. case KVM_REG_MIPS_CP0_COMPARE:
  379. kvm_mips_write_compare(vcpu, v);
  380. break;
  381. case KVM_REG_MIPS_CP0_CAUSE:
  382. /*
  383. * If the timer is stopped or started (DC bit) it must look
  384. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  385. * A timer interrupt should not happen in between.
  386. */
  387. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  388. if (v & CAUSEF_DC) {
  389. /* disable timer first */
  390. kvm_mips_count_disable_cause(vcpu);
  391. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  392. } else {
  393. /* enable timer last */
  394. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  395. kvm_mips_count_enable_cause(vcpu);
  396. }
  397. } else {
  398. kvm_write_c0_guest_cause(cop0, v);
  399. }
  400. break;
  401. case KVM_REG_MIPS_COUNT_CTL:
  402. ret = kvm_mips_set_count_ctl(vcpu, v);
  403. break;
  404. case KVM_REG_MIPS_COUNT_RESUME:
  405. ret = kvm_mips_set_count_resume(vcpu, v);
  406. break;
  407. case KVM_REG_MIPS_COUNT_HZ:
  408. ret = kvm_mips_set_count_hz(vcpu, v);
  409. break;
  410. default:
  411. return -EINVAL;
  412. }
  413. return ret;
  414. }
  415. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  416. /* exit handlers */
  417. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  418. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  419. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  420. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  421. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  422. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  423. .handle_syscall = kvm_trap_emul_handle_syscall,
  424. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  425. .handle_break = kvm_trap_emul_handle_break,
  426. .vm_init = kvm_trap_emul_vm_init,
  427. .vcpu_init = kvm_trap_emul_vcpu_init,
  428. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  429. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  430. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  431. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  432. .queue_io_int = kvm_mips_queue_io_int_cb,
  433. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  434. .irq_deliver = kvm_mips_irq_deliver_cb,
  435. .irq_clear = kvm_mips_irq_clear_cb,
  436. .get_one_reg = kvm_trap_emul_get_one_reg,
  437. .set_one_reg = kvm_trap_emul_set_one_reg,
  438. };
  439. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  440. {
  441. *install_callbacks = &kvm_trap_emul_callbacks;
  442. return 0;
  443. }