tlb.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/delay.h>
  16. #include <linux/module.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/srcu.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlb.h>
  25. #undef CONFIG_MIPS_MT
  26. #include <asm/r4kcache.h>
  27. #define CONFIG_MIPS_MT
  28. #define KVM_GUEST_PC_TLB 0
  29. #define KVM_GUEST_SP_TLB 1
  30. #define PRIx64 "llx"
  31. atomic_t kvm_mips_instance;
  32. EXPORT_SYMBOL_GPL(kvm_mips_instance);
  33. /* These function pointers are initialized once the KVM module is loaded */
  34. kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
  35. EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
  36. void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
  37. EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
  38. bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
  39. EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
  40. uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  41. {
  42. int cpu = smp_processor_id();
  43. return vcpu->arch.guest_kernel_asid[cpu] &
  44. cpu_asid_mask(&cpu_data[cpu]);
  45. }
  46. uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  47. {
  48. int cpu = smp_processor_id();
  49. return vcpu->arch.guest_user_asid[cpu] &
  50. cpu_asid_mask(&cpu_data[cpu]);
  51. }
  52. inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
  53. {
  54. return vcpu->kvm->arch.commpage_tlb;
  55. }
  56. /* Structure defining an tlb entry data set. */
  57. void kvm_mips_dump_host_tlbs(void)
  58. {
  59. unsigned long old_entryhi;
  60. unsigned long old_pagemask;
  61. struct kvm_mips_tlb tlb;
  62. unsigned long flags;
  63. int i;
  64. local_irq_save(flags);
  65. old_entryhi = read_c0_entryhi();
  66. old_pagemask = read_c0_pagemask();
  67. kvm_info("HOST TLBs:\n");
  68. kvm_info("ASID: %#lx\n", read_c0_entryhi() &
  69. cpu_asid_mask(&current_cpu_data));
  70. for (i = 0; i < current_cpu_data.tlbsize; i++) {
  71. write_c0_index(i);
  72. mtc0_tlbw_hazard();
  73. tlb_read();
  74. tlbw_use_hazard();
  75. tlb.tlb_hi = read_c0_entryhi();
  76. tlb.tlb_lo0 = read_c0_entrylo0();
  77. tlb.tlb_lo1 = read_c0_entrylo1();
  78. tlb.tlb_mask = read_c0_pagemask();
  79. kvm_info("TLB%c%3d Hi 0x%08lx ",
  80. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  81. i, tlb.tlb_hi);
  82. kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  83. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  84. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  85. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  86. (tlb.tlb_lo0 >> 3) & 7);
  87. kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  88. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  89. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  90. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  91. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  92. }
  93. write_c0_entryhi(old_entryhi);
  94. write_c0_pagemask(old_pagemask);
  95. mtc0_tlbw_hazard();
  96. local_irq_restore(flags);
  97. }
  98. EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
  99. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  100. {
  101. struct mips_coproc *cop0 = vcpu->arch.cop0;
  102. struct kvm_mips_tlb tlb;
  103. int i;
  104. kvm_info("Guest TLBs:\n");
  105. kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  106. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  107. tlb = vcpu->arch.guest_tlb[i];
  108. kvm_info("TLB%c%3d Hi 0x%08lx ",
  109. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  110. i, tlb.tlb_hi);
  111. kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  112. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  113. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  114. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  115. (tlb.tlb_lo0 >> 3) & 7);
  116. kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  117. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  118. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  119. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  120. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  121. }
  122. }
  123. EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
  124. static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
  125. {
  126. int srcu_idx, err = 0;
  127. kvm_pfn_t pfn;
  128. if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
  129. return 0;
  130. srcu_idx = srcu_read_lock(&kvm->srcu);
  131. pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
  132. if (kvm_mips_is_error_pfn(pfn)) {
  133. kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
  134. err = -EFAULT;
  135. goto out;
  136. }
  137. kvm->arch.guest_pmap[gfn] = pfn;
  138. out:
  139. srcu_read_unlock(&kvm->srcu, srcu_idx);
  140. return err;
  141. }
  142. /* Translate guest KSEG0 addresses to Host PA */
  143. unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  144. unsigned long gva)
  145. {
  146. gfn_t gfn;
  147. uint32_t offset = gva & ~PAGE_MASK;
  148. struct kvm *kvm = vcpu->kvm;
  149. if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
  150. kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
  151. __builtin_return_address(0), gva);
  152. return KVM_INVALID_PAGE;
  153. }
  154. gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
  155. if (gfn >= kvm->arch.guest_pmap_npages) {
  156. kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
  157. gva);
  158. return KVM_INVALID_PAGE;
  159. }
  160. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  161. return KVM_INVALID_ADDR;
  162. return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
  163. }
  164. EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
  165. /* XXXKYMA: Must be called with interrupts disabled */
  166. /* set flush_dcache_mask == 0 if no dcache flush required */
  167. int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
  168. unsigned long entrylo0, unsigned long entrylo1,
  169. int flush_dcache_mask)
  170. {
  171. unsigned long flags;
  172. unsigned long old_entryhi;
  173. int idx;
  174. local_irq_save(flags);
  175. old_entryhi = read_c0_entryhi();
  176. write_c0_entryhi(entryhi);
  177. mtc0_tlbw_hazard();
  178. tlb_probe();
  179. tlb_probe_hazard();
  180. idx = read_c0_index();
  181. if (idx > current_cpu_data.tlbsize) {
  182. kvm_err("%s: Invalid Index: %d\n", __func__, idx);
  183. kvm_mips_dump_host_tlbs();
  184. local_irq_restore(flags);
  185. return -1;
  186. }
  187. write_c0_entrylo0(entrylo0);
  188. write_c0_entrylo1(entrylo1);
  189. mtc0_tlbw_hazard();
  190. if (idx < 0)
  191. tlb_write_random();
  192. else
  193. tlb_write_indexed();
  194. tlbw_use_hazard();
  195. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  196. vcpu->arch.pc, idx, read_c0_entryhi(),
  197. read_c0_entrylo0(), read_c0_entrylo1());
  198. /* Flush D-cache */
  199. if (flush_dcache_mask) {
  200. if (entrylo0 & MIPS3_PG_V) {
  201. ++vcpu->stat.flush_dcache_exits;
  202. flush_data_cache_page((entryhi & VPN2_MASK) &
  203. ~flush_dcache_mask);
  204. }
  205. if (entrylo1 & MIPS3_PG_V) {
  206. ++vcpu->stat.flush_dcache_exits;
  207. flush_data_cache_page(((entryhi & VPN2_MASK) &
  208. ~flush_dcache_mask) |
  209. (0x1 << PAGE_SHIFT));
  210. }
  211. }
  212. /* Restore old ASID */
  213. write_c0_entryhi(old_entryhi);
  214. mtc0_tlbw_hazard();
  215. tlbw_use_hazard();
  216. local_irq_restore(flags);
  217. return 0;
  218. }
  219. /* XXXKYMA: Must be called with interrupts disabled */
  220. int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
  221. struct kvm_vcpu *vcpu)
  222. {
  223. gfn_t gfn;
  224. kvm_pfn_t pfn0, pfn1;
  225. unsigned long vaddr = 0;
  226. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  227. int even;
  228. struct kvm *kvm = vcpu->kvm;
  229. const int flush_dcache_mask = 0;
  230. int ret;
  231. if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
  232. kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
  233. kvm_mips_dump_host_tlbs();
  234. return -1;
  235. }
  236. gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
  237. if (gfn >= kvm->arch.guest_pmap_npages) {
  238. kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
  239. gfn, badvaddr);
  240. kvm_mips_dump_host_tlbs();
  241. return -1;
  242. }
  243. even = !(gfn & 0x1);
  244. vaddr = badvaddr & (PAGE_MASK << 1);
  245. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  246. return -1;
  247. if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
  248. return -1;
  249. if (even) {
  250. pfn0 = kvm->arch.guest_pmap[gfn];
  251. pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
  252. } else {
  253. pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
  254. pfn1 = kvm->arch.guest_pmap[gfn];
  255. }
  256. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
  257. (1 << 2) | (0x1 << 1);
  258. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
  259. (1 << 2) | (0x1 << 1);
  260. preempt_disable();
  261. entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
  262. ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  263. flush_dcache_mask);
  264. preempt_enable();
  265. return ret;
  266. }
  267. EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
  268. int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  269. struct kvm_vcpu *vcpu)
  270. {
  271. kvm_pfn_t pfn0, pfn1;
  272. unsigned long flags, old_entryhi = 0, vaddr = 0;
  273. unsigned long entrylo0 = 0, entrylo1 = 0;
  274. pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
  275. pfn1 = 0;
  276. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
  277. (1 << 2) | (0x1 << 1);
  278. entrylo1 = 0;
  279. local_irq_save(flags);
  280. old_entryhi = read_c0_entryhi();
  281. vaddr = badvaddr & (PAGE_MASK << 1);
  282. write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
  283. mtc0_tlbw_hazard();
  284. write_c0_entrylo0(entrylo0);
  285. mtc0_tlbw_hazard();
  286. write_c0_entrylo1(entrylo1);
  287. mtc0_tlbw_hazard();
  288. write_c0_index(kvm_mips_get_commpage_asid(vcpu));
  289. mtc0_tlbw_hazard();
  290. tlb_write_indexed();
  291. mtc0_tlbw_hazard();
  292. tlbw_use_hazard();
  293. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  294. vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
  295. read_c0_entrylo0(), read_c0_entrylo1());
  296. /* Restore old ASID */
  297. write_c0_entryhi(old_entryhi);
  298. mtc0_tlbw_hazard();
  299. tlbw_use_hazard();
  300. local_irq_restore(flags);
  301. return 0;
  302. }
  303. EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
  304. int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  305. struct kvm_mips_tlb *tlb,
  306. unsigned long *hpa0,
  307. unsigned long *hpa1)
  308. {
  309. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  310. struct kvm *kvm = vcpu->kvm;
  311. kvm_pfn_t pfn0, pfn1;
  312. int ret;
  313. if ((tlb->tlb_hi & VPN2_MASK) == 0) {
  314. pfn0 = 0;
  315. pfn1 = 0;
  316. } else {
  317. if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
  318. >> PAGE_SHIFT) < 0)
  319. return -1;
  320. if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
  321. >> PAGE_SHIFT) < 0)
  322. return -1;
  323. pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
  324. >> PAGE_SHIFT];
  325. pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
  326. >> PAGE_SHIFT];
  327. }
  328. if (hpa0)
  329. *hpa0 = pfn0 << PAGE_SHIFT;
  330. if (hpa1)
  331. *hpa1 = pfn1 << PAGE_SHIFT;
  332. /* Get attributes from the Guest TLB */
  333. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
  334. (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
  335. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
  336. (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
  337. kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
  338. tlb->tlb_lo0, tlb->tlb_lo1);
  339. preempt_disable();
  340. entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
  341. kvm_mips_get_kernel_asid(vcpu) :
  342. kvm_mips_get_user_asid(vcpu));
  343. ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  344. tlb->tlb_mask);
  345. preempt_enable();
  346. return ret;
  347. }
  348. EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
  349. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  350. {
  351. int i;
  352. int index = -1;
  353. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  354. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  355. if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
  356. TLB_HI_ASID_HIT(tlb[i], entryhi)) {
  357. index = i;
  358. break;
  359. }
  360. }
  361. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  362. __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
  363. return index;
  364. }
  365. EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
  366. int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
  367. {
  368. unsigned long old_entryhi, flags;
  369. int idx;
  370. local_irq_save(flags);
  371. old_entryhi = read_c0_entryhi();
  372. if (KVM_GUEST_KERNEL_MODE(vcpu))
  373. write_c0_entryhi((vaddr & VPN2_MASK) |
  374. kvm_mips_get_kernel_asid(vcpu));
  375. else {
  376. write_c0_entryhi((vaddr & VPN2_MASK) |
  377. kvm_mips_get_user_asid(vcpu));
  378. }
  379. mtc0_tlbw_hazard();
  380. tlb_probe();
  381. tlb_probe_hazard();
  382. idx = read_c0_index();
  383. /* Restore old ASID */
  384. write_c0_entryhi(old_entryhi);
  385. mtc0_tlbw_hazard();
  386. tlbw_use_hazard();
  387. local_irq_restore(flags);
  388. kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
  389. return idx;
  390. }
  391. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
  392. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
  393. {
  394. int idx;
  395. unsigned long flags, old_entryhi;
  396. local_irq_save(flags);
  397. old_entryhi = read_c0_entryhi();
  398. write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  399. mtc0_tlbw_hazard();
  400. tlb_probe();
  401. tlb_probe_hazard();
  402. idx = read_c0_index();
  403. if (idx >= current_cpu_data.tlbsize)
  404. BUG();
  405. if (idx > 0) {
  406. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  407. mtc0_tlbw_hazard();
  408. write_c0_entrylo0(0);
  409. mtc0_tlbw_hazard();
  410. write_c0_entrylo1(0);
  411. mtc0_tlbw_hazard();
  412. tlb_write_indexed();
  413. mtc0_tlbw_hazard();
  414. }
  415. write_c0_entryhi(old_entryhi);
  416. mtc0_tlbw_hazard();
  417. tlbw_use_hazard();
  418. local_irq_restore(flags);
  419. if (idx > 0)
  420. kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
  421. (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
  422. return 0;
  423. }
  424. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
  425. void kvm_mips_flush_host_tlb(int skip_kseg0)
  426. {
  427. unsigned long flags;
  428. unsigned long old_entryhi, entryhi;
  429. unsigned long old_pagemask;
  430. int entry = 0;
  431. int maxentry = current_cpu_data.tlbsize;
  432. local_irq_save(flags);
  433. old_entryhi = read_c0_entryhi();
  434. old_pagemask = read_c0_pagemask();
  435. /* Blast 'em all away. */
  436. for (entry = 0; entry < maxentry; entry++) {
  437. write_c0_index(entry);
  438. mtc0_tlbw_hazard();
  439. if (skip_kseg0) {
  440. tlb_read();
  441. tlbw_use_hazard();
  442. entryhi = read_c0_entryhi();
  443. /* Don't blow away guest kernel entries */
  444. if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
  445. continue;
  446. }
  447. /* Make sure all entries differ. */
  448. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  449. mtc0_tlbw_hazard();
  450. write_c0_entrylo0(0);
  451. mtc0_tlbw_hazard();
  452. write_c0_entrylo1(0);
  453. mtc0_tlbw_hazard();
  454. tlb_write_indexed();
  455. mtc0_tlbw_hazard();
  456. }
  457. tlbw_use_hazard();
  458. write_c0_entryhi(old_entryhi);
  459. write_c0_pagemask(old_pagemask);
  460. mtc0_tlbw_hazard();
  461. tlbw_use_hazard();
  462. local_irq_restore(flags);
  463. }
  464. EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
  465. void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  466. struct kvm_vcpu *vcpu)
  467. {
  468. unsigned long asid = asid_cache(cpu);
  469. asid += cpu_asid_inc();
  470. if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
  471. if (cpu_has_vtag_icache)
  472. flush_icache_all();
  473. kvm_local_flush_tlb_all(); /* start new asid cycle */
  474. if (!asid) /* fix version if needed */
  475. asid = asid_first_version(cpu);
  476. }
  477. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  478. }
  479. void kvm_local_flush_tlb_all(void)
  480. {
  481. unsigned long flags;
  482. unsigned long old_ctx;
  483. int entry = 0;
  484. local_irq_save(flags);
  485. /* Save old context and create impossible VPN2 value */
  486. old_ctx = read_c0_entryhi();
  487. write_c0_entrylo0(0);
  488. write_c0_entrylo1(0);
  489. /* Blast 'em all away. */
  490. while (entry < current_cpu_data.tlbsize) {
  491. /* Make sure all entries differ. */
  492. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  493. write_c0_index(entry);
  494. mtc0_tlbw_hazard();
  495. tlb_write_indexed();
  496. entry++;
  497. }
  498. tlbw_use_hazard();
  499. write_c0_entryhi(old_ctx);
  500. mtc0_tlbw_hazard();
  501. local_irq_restore(flags);
  502. }
  503. EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
  504. /**
  505. * kvm_mips_migrate_count() - Migrate timer.
  506. * @vcpu: Virtual CPU.
  507. *
  508. * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
  509. * if it was running prior to being cancelled.
  510. *
  511. * Must be called when the VCPU is migrated to a different CPU to ensure that
  512. * timer expiry during guest execution interrupts the guest and causes the
  513. * interrupt to be delivered in a timely manner.
  514. */
  515. static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
  516. {
  517. if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
  518. hrtimer_restart(&vcpu->arch.comparecount_timer);
  519. }
  520. /* Restore ASID once we are scheduled back after preemption */
  521. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  522. {
  523. unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
  524. unsigned long flags;
  525. int newasid = 0;
  526. kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
  527. /* Allocate new kernel and user ASIDs if needed */
  528. local_irq_save(flags);
  529. if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
  530. asid_version_mask(cpu)) {
  531. kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
  532. vcpu->arch.guest_kernel_asid[cpu] =
  533. vcpu->arch.guest_kernel_mm.context.asid[cpu];
  534. kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
  535. vcpu->arch.guest_user_asid[cpu] =
  536. vcpu->arch.guest_user_mm.context.asid[cpu];
  537. newasid++;
  538. kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
  539. cpu_context(cpu, current->mm));
  540. kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
  541. cpu, vcpu->arch.guest_kernel_asid[cpu]);
  542. kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
  543. vcpu->arch.guest_user_asid[cpu]);
  544. }
  545. if (vcpu->arch.last_sched_cpu != cpu) {
  546. kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
  547. vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
  548. /*
  549. * Migrate the timer interrupt to the current CPU so that it
  550. * always interrupts the guest and synchronously triggers a
  551. * guest timer interrupt.
  552. */
  553. kvm_mips_migrate_count(vcpu);
  554. }
  555. if (!newasid) {
  556. /*
  557. * If we preempted while the guest was executing, then reload
  558. * the pre-empted ASID
  559. */
  560. if (current->flags & PF_VCPU) {
  561. write_c0_entryhi(vcpu->arch.
  562. preempt_entryhi & asid_mask);
  563. ehb();
  564. }
  565. } else {
  566. /* New ASIDs were allocated for the VM */
  567. /*
  568. * Were we in guest context? If so then the pre-empted ASID is
  569. * no longer valid, we need to set it to what it should be based
  570. * on the mode of the Guest (Kernel/User)
  571. */
  572. if (current->flags & PF_VCPU) {
  573. if (KVM_GUEST_KERNEL_MODE(vcpu))
  574. write_c0_entryhi(vcpu->arch.
  575. guest_kernel_asid[cpu] &
  576. asid_mask);
  577. else
  578. write_c0_entryhi(vcpu->arch.
  579. guest_user_asid[cpu] &
  580. asid_mask);
  581. ehb();
  582. }
  583. }
  584. /* restore guest state to registers */
  585. kvm_mips_callbacks->vcpu_set_regs(vcpu);
  586. local_irq_restore(flags);
  587. }
  588. EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
  589. /* ASID can change if another task is scheduled during preemption */
  590. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  591. {
  592. unsigned long flags;
  593. uint32_t cpu;
  594. local_irq_save(flags);
  595. cpu = smp_processor_id();
  596. vcpu->arch.preempt_entryhi = read_c0_entryhi();
  597. vcpu->arch.last_sched_cpu = cpu;
  598. /* save guest state in registers */
  599. kvm_mips_callbacks->vcpu_get_regs(vcpu);
  600. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  601. asid_version_mask(cpu))) {
  602. kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
  603. cpu_context(cpu, current->mm));
  604. drop_mmu_context(current->mm, cpu);
  605. }
  606. write_c0_entryhi(cpu_asid(cpu, current->mm));
  607. ehb();
  608. local_irq_restore(flags);
  609. }
  610. EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
  611. uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
  612. {
  613. struct mips_coproc *cop0 = vcpu->arch.cop0;
  614. unsigned long paddr, flags, vpn2, asid;
  615. uint32_t inst;
  616. int index;
  617. if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
  618. KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  619. local_irq_save(flags);
  620. index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
  621. if (index >= 0) {
  622. inst = *(opc);
  623. } else {
  624. vpn2 = (unsigned long) opc & VPN2_MASK;
  625. asid = kvm_read_c0_guest_entryhi(cop0) &
  626. KVM_ENTRYHI_ASID;
  627. index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
  628. if (index < 0) {
  629. kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
  630. __func__, opc, vcpu, read_c0_entryhi());
  631. kvm_mips_dump_host_tlbs();
  632. local_irq_restore(flags);
  633. return KVM_INVALID_INST;
  634. }
  635. kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
  636. &vcpu->arch.
  637. guest_tlb[index],
  638. NULL, NULL);
  639. inst = *(opc);
  640. }
  641. local_irq_restore(flags);
  642. } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
  643. paddr =
  644. kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
  645. (unsigned long) opc);
  646. inst = *(uint32_t *) CKSEG0ADDR(paddr);
  647. } else {
  648. kvm_err("%s: illegal address: %p\n", __func__, opc);
  649. return KVM_INVALID_INST;
  650. }
  651. return inst;
  652. }
  653. EXPORT_SYMBOL_GPL(kvm_get_inst);