trap_emul.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/kvm_host.h>
  16. #include "opcode.h"
  17. #include "interrupt.h"
  18. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  19. {
  20. gpa_t gpa;
  21. uint32_t kseg = KSEGX(gva);
  22. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  23. gpa = CPHYSADDR(gva);
  24. else {
  25. kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  26. kvm_mips_dump_host_tlbs();
  27. gpa = KVM_INVALID_ADDR;
  28. }
  29. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  30. return gpa;
  31. }
  32. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  33. {
  34. struct kvm_run *run = vcpu->run;
  35. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  36. unsigned long cause = vcpu->arch.host_cp0_cause;
  37. enum emulation_result er = EMULATE_DONE;
  38. int ret = RESUME_GUEST;
  39. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
  40. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  41. else
  42. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  43. switch (er) {
  44. case EMULATE_DONE:
  45. ret = RESUME_GUEST;
  46. break;
  47. case EMULATE_FAIL:
  48. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  49. ret = RESUME_HOST;
  50. break;
  51. case EMULATE_WAIT:
  52. run->exit_reason = KVM_EXIT_INTR;
  53. ret = RESUME_HOST;
  54. break;
  55. default:
  56. BUG();
  57. }
  58. return ret;
  59. }
  60. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  61. {
  62. struct kvm_run *run = vcpu->run;
  63. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  64. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  65. unsigned long cause = vcpu->arch.host_cp0_cause;
  66. enum emulation_result er = EMULATE_DONE;
  67. int ret = RESUME_GUEST;
  68. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  69. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  70. kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  71. cause, opc, badvaddr);
  72. er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
  73. if (er == EMULATE_DONE)
  74. ret = RESUME_GUEST;
  75. else {
  76. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  77. ret = RESUME_HOST;
  78. }
  79. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  80. /*
  81. * XXXKYMA: The guest kernel does not expect to get this fault
  82. * when we are not using HIGHMEM. Need to address this in a
  83. * HIGHMEM kernel
  84. */
  85. kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
  86. cause, opc, badvaddr);
  87. kvm_mips_dump_host_tlbs();
  88. kvm_arch_vcpu_dump_regs(vcpu);
  89. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  90. ret = RESUME_HOST;
  91. } else {
  92. kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  93. cause, opc, badvaddr);
  94. kvm_mips_dump_host_tlbs();
  95. kvm_arch_vcpu_dump_regs(vcpu);
  96. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  97. ret = RESUME_HOST;
  98. }
  99. return ret;
  100. }
  101. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  102. {
  103. struct kvm_run *run = vcpu->run;
  104. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  105. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  106. unsigned long cause = vcpu->arch.host_cp0_cause;
  107. enum emulation_result er = EMULATE_DONE;
  108. int ret = RESUME_GUEST;
  109. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  110. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  111. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  112. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  113. ret = RESUME_HOST;
  114. }
  115. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  116. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  117. kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  118. cause, opc, badvaddr);
  119. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  120. if (er == EMULATE_DONE)
  121. ret = RESUME_GUEST;
  122. else {
  123. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  124. ret = RESUME_HOST;
  125. }
  126. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  127. /*
  128. * All KSEG0 faults are handled by KVM, as the guest kernel does
  129. * not expect to ever get them
  130. */
  131. if (kvm_mips_handle_kseg0_tlb_fault
  132. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  133. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  134. ret = RESUME_HOST;
  135. }
  136. } else {
  137. kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  138. cause, opc, badvaddr);
  139. kvm_mips_dump_host_tlbs();
  140. kvm_arch_vcpu_dump_regs(vcpu);
  141. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  142. ret = RESUME_HOST;
  143. }
  144. return ret;
  145. }
  146. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  147. {
  148. struct kvm_run *run = vcpu->run;
  149. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  150. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  151. unsigned long cause = vcpu->arch.host_cp0_cause;
  152. enum emulation_result er = EMULATE_DONE;
  153. int ret = RESUME_GUEST;
  154. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  155. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  156. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  157. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  158. ret = RESUME_HOST;
  159. }
  160. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  161. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  162. kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
  163. vcpu->arch.pc, badvaddr);
  164. /*
  165. * User Address (UA) fault, this could happen if
  166. * (1) TLB entry not present/valid in both Guest and shadow host
  167. * TLBs, in this case we pass on the fault to the guest
  168. * kernel and let it handle it.
  169. * (2) TLB entry is present in the Guest TLB but not in the
  170. * shadow, in this case we inject the TLB from the Guest TLB
  171. * into the shadow host TLB
  172. */
  173. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  174. if (er == EMULATE_DONE)
  175. ret = RESUME_GUEST;
  176. else {
  177. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  178. ret = RESUME_HOST;
  179. }
  180. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  181. if (kvm_mips_handle_kseg0_tlb_fault
  182. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  183. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  184. ret = RESUME_HOST;
  185. }
  186. } else {
  187. kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  188. cause, opc, badvaddr);
  189. kvm_mips_dump_host_tlbs();
  190. kvm_arch_vcpu_dump_regs(vcpu);
  191. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  192. ret = RESUME_HOST;
  193. }
  194. return ret;
  195. }
  196. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  197. {
  198. struct kvm_run *run = vcpu->run;
  199. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  200. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  201. unsigned long cause = vcpu->arch.host_cp0_cause;
  202. enum emulation_result er = EMULATE_DONE;
  203. int ret = RESUME_GUEST;
  204. if (KVM_GUEST_KERNEL_MODE(vcpu)
  205. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  206. kvm_debug("Emulate Store to MMIO space\n");
  207. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  208. if (er == EMULATE_FAIL) {
  209. kvm_err("Emulate Store to MMIO space failed\n");
  210. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  211. ret = RESUME_HOST;
  212. } else {
  213. run->exit_reason = KVM_EXIT_MMIO;
  214. ret = RESUME_HOST;
  215. }
  216. } else {
  217. kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  218. cause, opc, badvaddr);
  219. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  220. ret = RESUME_HOST;
  221. }
  222. return ret;
  223. }
  224. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  225. {
  226. struct kvm_run *run = vcpu->run;
  227. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  228. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  229. unsigned long cause = vcpu->arch.host_cp0_cause;
  230. enum emulation_result er = EMULATE_DONE;
  231. int ret = RESUME_GUEST;
  232. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  233. kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
  234. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  235. if (er == EMULATE_FAIL) {
  236. kvm_err("Emulate Load from MMIO space failed\n");
  237. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  238. ret = RESUME_HOST;
  239. } else {
  240. run->exit_reason = KVM_EXIT_MMIO;
  241. ret = RESUME_HOST;
  242. }
  243. } else {
  244. kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  245. cause, opc, badvaddr);
  246. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  247. ret = RESUME_HOST;
  248. er = EMULATE_FAIL;
  249. }
  250. return ret;
  251. }
  252. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  253. {
  254. struct kvm_run *run = vcpu->run;
  255. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  256. unsigned long cause = vcpu->arch.host_cp0_cause;
  257. enum emulation_result er = EMULATE_DONE;
  258. int ret = RESUME_GUEST;
  259. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  260. if (er == EMULATE_DONE)
  261. ret = RESUME_GUEST;
  262. else {
  263. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  264. ret = RESUME_HOST;
  265. }
  266. return ret;
  267. }
  268. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  269. {
  270. struct kvm_run *run = vcpu->run;
  271. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  272. unsigned long cause = vcpu->arch.host_cp0_cause;
  273. enum emulation_result er = EMULATE_DONE;
  274. int ret = RESUME_GUEST;
  275. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  276. if (er == EMULATE_DONE)
  277. ret = RESUME_GUEST;
  278. else {
  279. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  280. ret = RESUME_HOST;
  281. }
  282. return ret;
  283. }
  284. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  285. {
  286. struct kvm_run *run = vcpu->run;
  287. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  288. unsigned long cause = vcpu->arch.host_cp0_cause;
  289. enum emulation_result er = EMULATE_DONE;
  290. int ret = RESUME_GUEST;
  291. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  292. if (er == EMULATE_DONE)
  293. ret = RESUME_GUEST;
  294. else {
  295. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  296. ret = RESUME_HOST;
  297. }
  298. return ret;
  299. }
  300. static int kvm_trap_emul_vm_init(struct kvm *kvm)
  301. {
  302. return 0;
  303. }
  304. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  305. {
  306. return 0;
  307. }
  308. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  309. {
  310. struct mips_coproc *cop0 = vcpu->arch.cop0;
  311. uint32_t config1;
  312. int vcpu_id = vcpu->vcpu_id;
  313. /*
  314. * Arch specific stuff, set up config registers properly so that the
  315. * guest will come up as expected, for now we simulate a MIPS 24kc
  316. */
  317. kvm_write_c0_guest_prid(cop0, 0x00019300);
  318. kvm_write_c0_guest_config(cop0,
  319. MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
  320. (MMU_TYPE_R4000 << CP0C0_MT));
  321. /* Read the cache characteristics from the host Config1 Register */
  322. config1 = (read_c0_config1() & ~0x7f);
  323. /* Set up MMU size */
  324. config1 &= ~(0x3f << 25);
  325. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  326. /* We unset some bits that we aren't emulating */
  327. config1 &=
  328. ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
  329. (1 << CP0C1_WR) | (1 << CP0C1_CA));
  330. kvm_write_c0_guest_config1(cop0, config1);
  331. kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
  332. /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
  333. kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
  334. (1 << CP0C3_ULRI));
  335. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  336. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  337. /*
  338. * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
  339. */
  340. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  341. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  342. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
  343. return 0;
  344. }
  345. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  346. const struct kvm_one_reg *reg,
  347. s64 *v)
  348. {
  349. switch (reg->id) {
  350. case KVM_REG_MIPS_CP0_COUNT:
  351. *v = kvm_mips_read_count(vcpu);
  352. break;
  353. case KVM_REG_MIPS_COUNT_CTL:
  354. *v = vcpu->arch.count_ctl;
  355. break;
  356. case KVM_REG_MIPS_COUNT_RESUME:
  357. *v = ktime_to_ns(vcpu->arch.count_resume);
  358. break;
  359. case KVM_REG_MIPS_COUNT_HZ:
  360. *v = vcpu->arch.count_hz;
  361. break;
  362. default:
  363. return -EINVAL;
  364. }
  365. return 0;
  366. }
  367. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  368. const struct kvm_one_reg *reg,
  369. s64 v)
  370. {
  371. struct mips_coproc *cop0 = vcpu->arch.cop0;
  372. int ret = 0;
  373. switch (reg->id) {
  374. case KVM_REG_MIPS_CP0_COUNT:
  375. kvm_mips_write_count(vcpu, v);
  376. break;
  377. case KVM_REG_MIPS_CP0_COMPARE:
  378. kvm_mips_write_compare(vcpu, v);
  379. break;
  380. case KVM_REG_MIPS_CP0_CAUSE:
  381. /*
  382. * If the timer is stopped or started (DC bit) it must look
  383. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  384. * A timer interrupt should not happen in between.
  385. */
  386. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  387. if (v & CAUSEF_DC) {
  388. /* disable timer first */
  389. kvm_mips_count_disable_cause(vcpu);
  390. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  391. } else {
  392. /* enable timer last */
  393. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  394. kvm_mips_count_enable_cause(vcpu);
  395. }
  396. } else {
  397. kvm_write_c0_guest_cause(cop0, v);
  398. }
  399. break;
  400. case KVM_REG_MIPS_COUNT_CTL:
  401. ret = kvm_mips_set_count_ctl(vcpu, v);
  402. break;
  403. case KVM_REG_MIPS_COUNT_RESUME:
  404. ret = kvm_mips_set_count_resume(vcpu, v);
  405. break;
  406. case KVM_REG_MIPS_COUNT_HZ:
  407. ret = kvm_mips_set_count_hz(vcpu, v);
  408. break;
  409. default:
  410. return -EINVAL;
  411. }
  412. return ret;
  413. }
  414. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  415. /* exit handlers */
  416. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  417. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  418. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  419. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  420. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  421. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  422. .handle_syscall = kvm_trap_emul_handle_syscall,
  423. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  424. .handle_break = kvm_trap_emul_handle_break,
  425. .vm_init = kvm_trap_emul_vm_init,
  426. .vcpu_init = kvm_trap_emul_vcpu_init,
  427. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  428. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  429. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  430. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  431. .queue_io_int = kvm_mips_queue_io_int_cb,
  432. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  433. .irq_deliver = kvm_mips_irq_deliver_cb,
  434. .irq_clear = kvm_mips_irq_clear_cb,
  435. .get_one_reg = kvm_trap_emul_get_one_reg,
  436. .set_one_reg = kvm_trap_emul_set_one_reg,
  437. };
  438. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  439. {
  440. *install_callbacks = &kvm_trap_emul_callbacks;
  441. return 0;
  442. }