mmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS MMU handling in the KVM module.
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/highmem.h>
  12. #include <linux/kvm_host.h>
  13. #include <asm/mmu_context.h>
  14. static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  15. {
  16. int cpu = smp_processor_id();
  17. return vcpu->arch.guest_kernel_asid[cpu] &
  18. cpu_asid_mask(&cpu_data[cpu]);
  19. }
  20. static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  21. {
  22. int cpu = smp_processor_id();
  23. return vcpu->arch.guest_user_asid[cpu] &
  24. cpu_asid_mask(&cpu_data[cpu]);
  25. }
  26. static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
  27. {
  28. int srcu_idx, err = 0;
  29. kvm_pfn_t pfn;
  30. if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
  31. return 0;
  32. srcu_idx = srcu_read_lock(&kvm->srcu);
  33. pfn = gfn_to_pfn(kvm, gfn);
  34. if (is_error_noslot_pfn(pfn)) {
  35. kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
  36. err = -EFAULT;
  37. goto out;
  38. }
  39. kvm->arch.guest_pmap[gfn] = pfn;
  40. out:
  41. srcu_read_unlock(&kvm->srcu, srcu_idx);
  42. return err;
  43. }
  44. /* Translate guest KSEG0 addresses to Host PA */
  45. unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  46. unsigned long gva)
  47. {
  48. gfn_t gfn;
  49. unsigned long offset = gva & ~PAGE_MASK;
  50. struct kvm *kvm = vcpu->kvm;
  51. if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
  52. kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
  53. __builtin_return_address(0), gva);
  54. return KVM_INVALID_PAGE;
  55. }
  56. gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
  57. if (gfn >= kvm->arch.guest_pmap_npages) {
  58. kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
  59. gva);
  60. return KVM_INVALID_PAGE;
  61. }
  62. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  63. return KVM_INVALID_ADDR;
  64. return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
  65. }
  66. /* XXXKYMA: Must be called with interrupts disabled */
  67. int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
  68. struct kvm_vcpu *vcpu)
  69. {
  70. gfn_t gfn;
  71. kvm_pfn_t pfn0, pfn1;
  72. unsigned long vaddr = 0;
  73. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  74. struct kvm *kvm = vcpu->kvm;
  75. const int flush_dcache_mask = 0;
  76. int ret;
  77. if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
  78. kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
  79. kvm_mips_dump_host_tlbs();
  80. return -1;
  81. }
  82. gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
  83. if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
  84. kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
  85. gfn, badvaddr);
  86. kvm_mips_dump_host_tlbs();
  87. return -1;
  88. }
  89. vaddr = badvaddr & (PAGE_MASK << 1);
  90. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  91. return -1;
  92. if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
  93. return -1;
  94. pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
  95. pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
  96. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
  97. ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
  98. ENTRYLO_D | ENTRYLO_V;
  99. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
  100. ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
  101. ENTRYLO_D | ENTRYLO_V;
  102. preempt_disable();
  103. entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
  104. ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  105. flush_dcache_mask);
  106. preempt_enable();
  107. return ret;
  108. }
  109. int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  110. struct kvm_mips_tlb *tlb)
  111. {
  112. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  113. struct kvm *kvm = vcpu->kvm;
  114. kvm_pfn_t pfn0, pfn1;
  115. gfn_t gfn0, gfn1;
  116. long tlb_lo[2];
  117. int ret;
  118. tlb_lo[0] = tlb->tlb_lo[0];
  119. tlb_lo[1] = tlb->tlb_lo[1];
  120. /*
  121. * The commpage address must not be mapped to anything else if the guest
  122. * TLB contains entries nearby, or commpage accesses will break.
  123. */
  124. if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
  125. VPN2_MASK & (PAGE_MASK << 1)))
  126. tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
  127. gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
  128. gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
  129. if (gfn0 >= kvm->arch.guest_pmap_npages ||
  130. gfn1 >= kvm->arch.guest_pmap_npages) {
  131. kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
  132. __func__, gfn0, gfn1, tlb->tlb_hi);
  133. kvm_mips_dump_guest_tlbs(vcpu);
  134. return -1;
  135. }
  136. if (kvm_mips_map_page(kvm, gfn0) < 0)
  137. return -1;
  138. if (kvm_mips_map_page(kvm, gfn1) < 0)
  139. return -1;
  140. pfn0 = kvm->arch.guest_pmap[gfn0];
  141. pfn1 = kvm->arch.guest_pmap[gfn1];
  142. /* Get attributes from the Guest TLB */
  143. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
  144. ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
  145. (tlb_lo[0] & ENTRYLO_D) |
  146. (tlb_lo[0] & ENTRYLO_V);
  147. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
  148. ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
  149. (tlb_lo[1] & ENTRYLO_D) |
  150. (tlb_lo[1] & ENTRYLO_V);
  151. kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
  152. tlb->tlb_lo[0], tlb->tlb_lo[1]);
  153. preempt_disable();
  154. entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
  155. kvm_mips_get_kernel_asid(vcpu) :
  156. kvm_mips_get_user_asid(vcpu));
  157. ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  158. tlb->tlb_mask);
  159. preempt_enable();
  160. return ret;
  161. }
  162. void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  163. struct kvm_vcpu *vcpu)
  164. {
  165. unsigned long asid = asid_cache(cpu);
  166. asid += cpu_asid_inc();
  167. if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
  168. if (cpu_has_vtag_icache)
  169. flush_icache_all();
  170. kvm_local_flush_tlb_all(); /* start new asid cycle */
  171. if (!asid) /* fix version if needed */
  172. asid = asid_first_version(cpu);
  173. }
  174. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  175. }
  176. /**
  177. * kvm_mips_migrate_count() - Migrate timer.
  178. * @vcpu: Virtual CPU.
  179. *
  180. * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
  181. * if it was running prior to being cancelled.
  182. *
  183. * Must be called when the VCPU is migrated to a different CPU to ensure that
  184. * timer expiry during guest execution interrupts the guest and causes the
  185. * interrupt to be delivered in a timely manner.
  186. */
  187. static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
  188. {
  189. if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
  190. hrtimer_restart(&vcpu->arch.comparecount_timer);
  191. }
  192. /* Restore ASID once we are scheduled back after preemption */
  193. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  194. {
  195. unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
  196. unsigned long flags;
  197. int newasid = 0;
  198. kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
  199. /* Allocate new kernel and user ASIDs if needed */
  200. local_irq_save(flags);
  201. if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
  202. asid_version_mask(cpu)) {
  203. kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
  204. vcpu->arch.guest_kernel_asid[cpu] =
  205. vcpu->arch.guest_kernel_mm.context.asid[cpu];
  206. newasid++;
  207. kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
  208. cpu_context(cpu, current->mm));
  209. kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
  210. cpu, vcpu->arch.guest_kernel_asid[cpu]);
  211. }
  212. if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
  213. asid_version_mask(cpu)) {
  214. u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
  215. KVM_ENTRYHI_ASID;
  216. kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
  217. vcpu->arch.guest_user_asid[cpu] =
  218. vcpu->arch.guest_user_mm.context.asid[cpu];
  219. vcpu->arch.last_user_gasid = gasid;
  220. newasid++;
  221. kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
  222. cpu_context(cpu, current->mm));
  223. kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
  224. vcpu->arch.guest_user_asid[cpu]);
  225. }
  226. if (vcpu->arch.last_sched_cpu != cpu) {
  227. kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
  228. vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
  229. /*
  230. * Migrate the timer interrupt to the current CPU so that it
  231. * always interrupts the guest and synchronously triggers a
  232. * guest timer interrupt.
  233. */
  234. kvm_mips_migrate_count(vcpu);
  235. }
  236. if (!newasid) {
  237. /*
  238. * If we preempted while the guest was executing, then reload
  239. * the pre-empted ASID
  240. */
  241. if (current->flags & PF_VCPU) {
  242. write_c0_entryhi(vcpu->arch.
  243. preempt_entryhi & asid_mask);
  244. ehb();
  245. }
  246. } else {
  247. /* New ASIDs were allocated for the VM */
  248. /*
  249. * Were we in guest context? If so then the pre-empted ASID is
  250. * no longer valid, we need to set it to what it should be based
  251. * on the mode of the Guest (Kernel/User)
  252. */
  253. if (current->flags & PF_VCPU) {
  254. if (KVM_GUEST_KERNEL_MODE(vcpu))
  255. write_c0_entryhi(vcpu->arch.
  256. guest_kernel_asid[cpu] &
  257. asid_mask);
  258. else
  259. write_c0_entryhi(vcpu->arch.
  260. guest_user_asid[cpu] &
  261. asid_mask);
  262. ehb();
  263. }
  264. }
  265. /* restore guest state to registers */
  266. kvm_mips_callbacks->vcpu_set_regs(vcpu);
  267. local_irq_restore(flags);
  268. }
  269. /* ASID can change if another task is scheduled during preemption */
  270. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  271. {
  272. unsigned long flags;
  273. int cpu;
  274. local_irq_save(flags);
  275. cpu = smp_processor_id();
  276. vcpu->arch.preempt_entryhi = read_c0_entryhi();
  277. vcpu->arch.last_sched_cpu = cpu;
  278. /* save guest state in registers */
  279. kvm_mips_callbacks->vcpu_get_regs(vcpu);
  280. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  281. asid_version_mask(cpu))) {
  282. kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
  283. cpu_context(cpu, current->mm));
  284. drop_mmu_context(current->mm, cpu);
  285. }
  286. write_c0_entryhi(cpu_asid(cpu, current->mm));
  287. ehb();
  288. local_irq_restore(flags);
  289. }
  290. u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
  291. {
  292. struct mips_coproc *cop0 = vcpu->arch.cop0;
  293. unsigned long paddr, flags, vpn2, asid;
  294. unsigned long va = (unsigned long)opc;
  295. void *vaddr;
  296. u32 inst;
  297. int index;
  298. if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
  299. KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
  300. local_irq_save(flags);
  301. index = kvm_mips_host_tlb_lookup(vcpu, va);
  302. if (index >= 0) {
  303. inst = *(opc);
  304. } else {
  305. vpn2 = va & VPN2_MASK;
  306. asid = kvm_read_c0_guest_entryhi(cop0) &
  307. KVM_ENTRYHI_ASID;
  308. index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
  309. if (index < 0) {
  310. kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
  311. __func__, opc, vcpu, read_c0_entryhi());
  312. kvm_mips_dump_host_tlbs();
  313. kvm_mips_dump_guest_tlbs(vcpu);
  314. local_irq_restore(flags);
  315. return KVM_INVALID_INST;
  316. }
  317. if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
  318. &vcpu->arch.guest_tlb[index])) {
  319. kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
  320. __func__, opc, index, vcpu,
  321. read_c0_entryhi());
  322. kvm_mips_dump_guest_tlbs(vcpu);
  323. local_irq_restore(flags);
  324. return KVM_INVALID_INST;
  325. }
  326. inst = *(opc);
  327. }
  328. local_irq_restore(flags);
  329. } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
  330. paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
  331. vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
  332. vaddr += paddr & ~PAGE_MASK;
  333. inst = *(u32 *)vaddr;
  334. kunmap_atomic(vaddr);
  335. } else {
  336. kvm_err("%s: illegal address: %p\n", __func__, opc);
  337. return KVM_INVALID_INST;
  338. }
  339. return inst;
  340. }