kvm_tlb.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/delay.h>
  16. #include <linux/module.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/srcu.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlb.h>
  25. #undef CONFIG_MIPS_MT
  26. #include <asm/r4kcache.h>
  27. #define CONFIG_MIPS_MT
  28. #define KVM_GUEST_PC_TLB 0
  29. #define KVM_GUEST_SP_TLB 1
  30. #define PRIx64 "llx"
  31. atomic_t kvm_mips_instance;
  32. EXPORT_SYMBOL(kvm_mips_instance);
  33. /* These function pointers are initialized once the KVM module is loaded */
  34. pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
  35. EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
  36. void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
  37. EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
  38. bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
  39. EXPORT_SYMBOL(kvm_mips_is_error_pfn);
  40. uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  41. {
  42. return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
  43. }
  44. uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  45. {
  46. return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
  47. }
  48. inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
  49. {
  50. return vcpu->kvm->arch.commpage_tlb;
  51. }
  52. /*
  53. * Structure defining an tlb entry data set.
  54. */
  55. void kvm_mips_dump_host_tlbs(void)
  56. {
  57. unsigned long old_entryhi;
  58. unsigned long old_pagemask;
  59. struct kvm_mips_tlb tlb;
  60. unsigned long flags;
  61. int i;
  62. local_irq_save(flags);
  63. old_entryhi = read_c0_entryhi();
  64. old_pagemask = read_c0_pagemask();
  65. printk("HOST TLBs:\n");
  66. printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
  67. for (i = 0; i < current_cpu_data.tlbsize; i++) {
  68. write_c0_index(i);
  69. mtc0_tlbw_hazard();
  70. tlb_read();
  71. tlbw_use_hazard();
  72. tlb.tlb_hi = read_c0_entryhi();
  73. tlb.tlb_lo0 = read_c0_entrylo0();
  74. tlb.tlb_lo1 = read_c0_entrylo1();
  75. tlb.tlb_mask = read_c0_pagemask();
  76. printk("TLB%c%3d Hi 0x%08lx ",
  77. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  78. i, tlb.tlb_hi);
  79. printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  80. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  81. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  82. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  83. (tlb.tlb_lo0 >> 3) & 7);
  84. printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  85. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  86. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  87. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  88. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  89. }
  90. write_c0_entryhi(old_entryhi);
  91. write_c0_pagemask(old_pagemask);
  92. mtc0_tlbw_hazard();
  93. local_irq_restore(flags);
  94. }
  95. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  96. {
  97. struct mips_coproc *cop0 = vcpu->arch.cop0;
  98. struct kvm_mips_tlb tlb;
  99. int i;
  100. printk("Guest TLBs:\n");
  101. printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  102. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  103. tlb = vcpu->arch.guest_tlb[i];
  104. printk("TLB%c%3d Hi 0x%08lx ",
  105. (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
  106. i, tlb.tlb_hi);
  107. printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
  108. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
  109. (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
  110. (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
  111. (tlb.tlb_lo0 >> 3) & 7);
  112. printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
  113. (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
  114. (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
  115. (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
  116. (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
  117. }
  118. }
  119. static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
  120. {
  121. int srcu_idx, err = 0;
  122. pfn_t pfn;
  123. if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
  124. return 0;
  125. srcu_idx = srcu_read_lock(&kvm->srcu);
  126. pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
  127. if (kvm_mips_is_error_pfn(pfn)) {
  128. kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
  129. err = -EFAULT;
  130. goto out;
  131. }
  132. kvm->arch.guest_pmap[gfn] = pfn;
  133. out:
  134. srcu_read_unlock(&kvm->srcu, srcu_idx);
  135. return err;
  136. }
  137. /* Translate guest KSEG0 addresses to Host PA */
  138. unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  139. unsigned long gva)
  140. {
  141. gfn_t gfn;
  142. uint32_t offset = gva & ~PAGE_MASK;
  143. struct kvm *kvm = vcpu->kvm;
  144. if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
  145. kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
  146. __builtin_return_address(0), gva);
  147. return KVM_INVALID_PAGE;
  148. }
  149. gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
  150. if (gfn >= kvm->arch.guest_pmap_npages) {
  151. kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
  152. gva);
  153. return KVM_INVALID_PAGE;
  154. }
  155. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  156. return KVM_INVALID_ADDR;
  157. return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
  158. }
  159. /* XXXKYMA: Must be called with interrupts disabled */
  160. /* set flush_dcache_mask == 0 if no dcache flush required */
  161. int
  162. kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
  163. unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
  164. {
  165. unsigned long flags;
  166. unsigned long old_entryhi;
  167. volatile int idx;
  168. local_irq_save(flags);
  169. old_entryhi = read_c0_entryhi();
  170. write_c0_entryhi(entryhi);
  171. mtc0_tlbw_hazard();
  172. tlb_probe();
  173. tlb_probe_hazard();
  174. idx = read_c0_index();
  175. if (idx > current_cpu_data.tlbsize) {
  176. kvm_err("%s: Invalid Index: %d\n", __func__, idx);
  177. kvm_mips_dump_host_tlbs();
  178. return -1;
  179. }
  180. if (idx < 0) {
  181. idx = read_c0_random() % current_cpu_data.tlbsize;
  182. write_c0_index(idx);
  183. mtc0_tlbw_hazard();
  184. }
  185. write_c0_entrylo0(entrylo0);
  186. write_c0_entrylo1(entrylo1);
  187. mtc0_tlbw_hazard();
  188. tlb_write_indexed();
  189. tlbw_use_hazard();
  190. #ifdef DEBUG
  191. if (debug) {
  192. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
  193. "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  194. vcpu->arch.pc, idx, read_c0_entryhi(),
  195. read_c0_entrylo0(), read_c0_entrylo1());
  196. }
  197. #endif
  198. /* Flush D-cache */
  199. if (flush_dcache_mask) {
  200. if (entrylo0 & MIPS3_PG_V) {
  201. ++vcpu->stat.flush_dcache_exits;
  202. flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
  203. }
  204. if (entrylo1 & MIPS3_PG_V) {
  205. ++vcpu->stat.flush_dcache_exits;
  206. flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
  207. (0x1 << PAGE_SHIFT));
  208. }
  209. }
  210. /* Restore old ASID */
  211. write_c0_entryhi(old_entryhi);
  212. mtc0_tlbw_hazard();
  213. tlbw_use_hazard();
  214. local_irq_restore(flags);
  215. return 0;
  216. }
  217. /* XXXKYMA: Must be called with interrupts disabled */
  218. int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
  219. struct kvm_vcpu *vcpu)
  220. {
  221. gfn_t gfn;
  222. pfn_t pfn0, pfn1;
  223. unsigned long vaddr = 0;
  224. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  225. int even;
  226. struct kvm *kvm = vcpu->kvm;
  227. const int flush_dcache_mask = 0;
  228. if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
  229. kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
  230. kvm_mips_dump_host_tlbs();
  231. return -1;
  232. }
  233. gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
  234. if (gfn >= kvm->arch.guest_pmap_npages) {
  235. kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
  236. gfn, badvaddr);
  237. kvm_mips_dump_host_tlbs();
  238. return -1;
  239. }
  240. even = !(gfn & 0x1);
  241. vaddr = badvaddr & (PAGE_MASK << 1);
  242. if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
  243. return -1;
  244. if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
  245. return -1;
  246. if (even) {
  247. pfn0 = kvm->arch.guest_pmap[gfn];
  248. pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
  249. } else {
  250. pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
  251. pfn1 = kvm->arch.guest_pmap[gfn];
  252. }
  253. entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
  254. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  255. (0x1 << 1);
  256. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  257. (0x1 << 1);
  258. return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  259. flush_dcache_mask);
  260. }
  261. int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  262. struct kvm_vcpu *vcpu)
  263. {
  264. pfn_t pfn0, pfn1;
  265. unsigned long flags, old_entryhi = 0, vaddr = 0;
  266. unsigned long entrylo0 = 0, entrylo1 = 0;
  267. pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
  268. pfn1 = 0;
  269. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
  270. (0x1 << 1);
  271. entrylo1 = 0;
  272. local_irq_save(flags);
  273. old_entryhi = read_c0_entryhi();
  274. vaddr = badvaddr & (PAGE_MASK << 1);
  275. write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
  276. mtc0_tlbw_hazard();
  277. write_c0_entrylo0(entrylo0);
  278. mtc0_tlbw_hazard();
  279. write_c0_entrylo1(entrylo1);
  280. mtc0_tlbw_hazard();
  281. write_c0_index(kvm_mips_get_commpage_asid(vcpu));
  282. mtc0_tlbw_hazard();
  283. tlb_write_indexed();
  284. mtc0_tlbw_hazard();
  285. tlbw_use_hazard();
  286. #ifdef DEBUG
  287. kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  288. vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
  289. read_c0_entrylo0(), read_c0_entrylo1());
  290. #endif
  291. /* Restore old ASID */
  292. write_c0_entryhi(old_entryhi);
  293. mtc0_tlbw_hazard();
  294. tlbw_use_hazard();
  295. local_irq_restore(flags);
  296. return 0;
  297. }
  298. int
  299. kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  300. struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
  301. {
  302. unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
  303. struct kvm *kvm = vcpu->kvm;
  304. pfn_t pfn0, pfn1;
  305. if ((tlb->tlb_hi & VPN2_MASK) == 0) {
  306. pfn0 = 0;
  307. pfn1 = 0;
  308. } else {
  309. if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
  310. return -1;
  311. if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
  312. return -1;
  313. pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
  314. pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
  315. }
  316. if (hpa0)
  317. *hpa0 = pfn0 << PAGE_SHIFT;
  318. if (hpa1)
  319. *hpa1 = pfn1 << PAGE_SHIFT;
  320. /* Get attributes from the Guest TLB */
  321. entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
  322. kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
  323. entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
  324. (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
  325. entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
  326. (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
  327. #ifdef DEBUG
  328. kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
  329. tlb->tlb_lo0, tlb->tlb_lo1);
  330. #endif
  331. return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
  332. tlb->tlb_mask);
  333. }
  334. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  335. {
  336. int i;
  337. int index = -1;
  338. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  339. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  340. if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
  341. (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
  342. index = i;
  343. break;
  344. }
  345. }
  346. #ifdef DEBUG
  347. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  348. __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
  349. #endif
  350. return index;
  351. }
  352. int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
  353. {
  354. unsigned long old_entryhi, flags;
  355. volatile int idx;
  356. local_irq_save(flags);
  357. old_entryhi = read_c0_entryhi();
  358. if (KVM_GUEST_KERNEL_MODE(vcpu))
  359. write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
  360. else {
  361. write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  362. }
  363. mtc0_tlbw_hazard();
  364. tlb_probe();
  365. tlb_probe_hazard();
  366. idx = read_c0_index();
  367. /* Restore old ASID */
  368. write_c0_entryhi(old_entryhi);
  369. mtc0_tlbw_hazard();
  370. tlbw_use_hazard();
  371. local_irq_restore(flags);
  372. #ifdef DEBUG
  373. kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
  374. #endif
  375. return idx;
  376. }
  377. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
  378. {
  379. int idx;
  380. unsigned long flags, old_entryhi;
  381. local_irq_save(flags);
  382. old_entryhi = read_c0_entryhi();
  383. write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  384. mtc0_tlbw_hazard();
  385. tlb_probe();
  386. tlb_probe_hazard();
  387. idx = read_c0_index();
  388. if (idx >= current_cpu_data.tlbsize)
  389. BUG();
  390. if (idx > 0) {
  391. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  392. mtc0_tlbw_hazard();
  393. write_c0_entrylo0(0);
  394. mtc0_tlbw_hazard();
  395. write_c0_entrylo1(0);
  396. mtc0_tlbw_hazard();
  397. tlb_write_indexed();
  398. mtc0_tlbw_hazard();
  399. }
  400. write_c0_entryhi(old_entryhi);
  401. mtc0_tlbw_hazard();
  402. tlbw_use_hazard();
  403. local_irq_restore(flags);
  404. #ifdef DEBUG
  405. if (idx > 0) {
  406. kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
  407. (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
  408. }
  409. #endif
  410. return 0;
  411. }
  412. /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
  413. int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
  414. {
  415. unsigned long flags, old_entryhi;
  416. if (index >= current_cpu_data.tlbsize)
  417. BUG();
  418. local_irq_save(flags);
  419. old_entryhi = read_c0_entryhi();
  420. write_c0_entryhi(UNIQUE_ENTRYHI(index));
  421. mtc0_tlbw_hazard();
  422. write_c0_index(index);
  423. mtc0_tlbw_hazard();
  424. write_c0_entrylo0(0);
  425. mtc0_tlbw_hazard();
  426. write_c0_entrylo1(0);
  427. mtc0_tlbw_hazard();
  428. tlb_write_indexed();
  429. mtc0_tlbw_hazard();
  430. tlbw_use_hazard();
  431. write_c0_entryhi(old_entryhi);
  432. mtc0_tlbw_hazard();
  433. tlbw_use_hazard();
  434. local_irq_restore(flags);
  435. return 0;
  436. }
  437. void kvm_mips_flush_host_tlb(int skip_kseg0)
  438. {
  439. unsigned long flags;
  440. unsigned long old_entryhi, entryhi;
  441. unsigned long old_pagemask;
  442. int entry = 0;
  443. int maxentry = current_cpu_data.tlbsize;
  444. local_irq_save(flags);
  445. old_entryhi = read_c0_entryhi();
  446. old_pagemask = read_c0_pagemask();
  447. /* Blast 'em all away. */
  448. for (entry = 0; entry < maxentry; entry++) {
  449. write_c0_index(entry);
  450. mtc0_tlbw_hazard();
  451. if (skip_kseg0) {
  452. tlb_read();
  453. tlbw_use_hazard();
  454. entryhi = read_c0_entryhi();
  455. /* Don't blow away guest kernel entries */
  456. if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
  457. continue;
  458. }
  459. }
  460. /* Make sure all entries differ. */
  461. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  462. mtc0_tlbw_hazard();
  463. write_c0_entrylo0(0);
  464. mtc0_tlbw_hazard();
  465. write_c0_entrylo1(0);
  466. mtc0_tlbw_hazard();
  467. tlb_write_indexed();
  468. mtc0_tlbw_hazard();
  469. }
  470. tlbw_use_hazard();
  471. write_c0_entryhi(old_entryhi);
  472. write_c0_pagemask(old_pagemask);
  473. mtc0_tlbw_hazard();
  474. tlbw_use_hazard();
  475. local_irq_restore(flags);
  476. }
  477. void
  478. kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  479. struct kvm_vcpu *vcpu)
  480. {
  481. unsigned long asid = asid_cache(cpu);
  482. if (!((asid += ASID_INC) & ASID_MASK)) {
  483. if (cpu_has_vtag_icache) {
  484. flush_icache_all();
  485. }
  486. kvm_local_flush_tlb_all(); /* start new asid cycle */
  487. if (!asid) /* fix version if needed */
  488. asid = ASID_FIRST_VERSION;
  489. }
  490. cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  491. }
  492. void kvm_local_flush_tlb_all(void)
  493. {
  494. unsigned long flags;
  495. unsigned long old_ctx;
  496. int entry = 0;
  497. local_irq_save(flags);
  498. /* Save old context and create impossible VPN2 value */
  499. old_ctx = read_c0_entryhi();
  500. write_c0_entrylo0(0);
  501. write_c0_entrylo1(0);
  502. /* Blast 'em all away. */
  503. while (entry < current_cpu_data.tlbsize) {
  504. /* Make sure all entries differ. */
  505. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  506. write_c0_index(entry);
  507. mtc0_tlbw_hazard();
  508. tlb_write_indexed();
  509. entry++;
  510. }
  511. tlbw_use_hazard();
  512. write_c0_entryhi(old_ctx);
  513. mtc0_tlbw_hazard();
  514. local_irq_restore(flags);
  515. }
  516. /* Restore ASID once we are scheduled back after preemption */
  517. void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  518. {
  519. unsigned long flags;
  520. int newasid = 0;
  521. #ifdef DEBUG
  522. kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
  523. #endif
  524. /* Alocate new kernel and user ASIDs if needed */
  525. local_irq_save(flags);
  526. if (((vcpu->arch.
  527. guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
  528. kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
  529. vcpu->arch.guest_kernel_asid[cpu] =
  530. vcpu->arch.guest_kernel_mm.context.asid[cpu];
  531. kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
  532. vcpu->arch.guest_user_asid[cpu] =
  533. vcpu->arch.guest_user_mm.context.asid[cpu];
  534. newasid++;
  535. kvm_info("[%d]: cpu_context: %#lx\n", cpu,
  536. cpu_context(cpu, current->mm));
  537. kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
  538. cpu, vcpu->arch.guest_kernel_asid[cpu]);
  539. kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
  540. vcpu->arch.guest_user_asid[cpu]);
  541. }
  542. if (vcpu->arch.last_sched_cpu != cpu) {
  543. kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
  544. vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
  545. }
  546. if (!newasid) {
  547. /* If we preempted while the guest was executing, then reload the pre-empted ASID */
  548. if (current->flags & PF_VCPU) {
  549. write_c0_entryhi(vcpu->arch.
  550. preempt_entryhi & ASID_MASK);
  551. ehb();
  552. }
  553. } else {
  554. /* New ASIDs were allocated for the VM */
  555. /* Were we in guest context? If so then the pre-empted ASID is no longer
  556. * valid, we need to set it to what it should be based on the mode of
  557. * the Guest (Kernel/User)
  558. */
  559. if (current->flags & PF_VCPU) {
  560. if (KVM_GUEST_KERNEL_MODE(vcpu))
  561. write_c0_entryhi(vcpu->arch.
  562. guest_kernel_asid[cpu] &
  563. ASID_MASK);
  564. else
  565. write_c0_entryhi(vcpu->arch.
  566. guest_user_asid[cpu] &
  567. ASID_MASK);
  568. ehb();
  569. }
  570. }
  571. local_irq_restore(flags);
  572. }
  573. /* ASID can change if another task is scheduled during preemption */
  574. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
  575. {
  576. unsigned long flags;
  577. uint32_t cpu;
  578. local_irq_save(flags);
  579. cpu = smp_processor_id();
  580. vcpu->arch.preempt_entryhi = read_c0_entryhi();
  581. vcpu->arch.last_sched_cpu = cpu;
  582. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  583. ASID_VERSION_MASK)) {
  584. kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
  585. cpu_context(cpu, current->mm));
  586. drop_mmu_context(current->mm, cpu);
  587. }
  588. write_c0_entryhi(cpu_asid(cpu, current->mm));
  589. ehb();
  590. local_irq_restore(flags);
  591. }
  592. uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
  593. {
  594. struct mips_coproc *cop0 = vcpu->arch.cop0;
  595. unsigned long paddr, flags;
  596. uint32_t inst;
  597. int index;
  598. if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
  599. KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  600. local_irq_save(flags);
  601. index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
  602. if (index >= 0) {
  603. inst = *(opc);
  604. } else {
  605. index =
  606. kvm_mips_guest_tlb_lookup(vcpu,
  607. ((unsigned long) opc & VPN2_MASK)
  608. |
  609. (kvm_read_c0_guest_entryhi
  610. (cop0) & ASID_MASK));
  611. if (index < 0) {
  612. kvm_err
  613. ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
  614. __func__, opc, vcpu, read_c0_entryhi());
  615. kvm_mips_dump_host_tlbs();
  616. local_irq_restore(flags);
  617. return KVM_INVALID_INST;
  618. }
  619. kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
  620. &vcpu->arch.
  621. guest_tlb[index],
  622. NULL, NULL);
  623. inst = *(opc);
  624. }
  625. local_irq_restore(flags);
  626. } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
  627. paddr =
  628. kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
  629. (unsigned long) opc);
  630. inst = *(uint32_t *) CKSEG0ADDR(paddr);
  631. } else {
  632. kvm_err("%s: illegal address: %p\n", __func__, opc);
  633. return KVM_INVALID_INST;
  634. }
  635. return inst;
  636. }
  637. EXPORT_SYMBOL(kvm_local_flush_tlb_all);
  638. EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
  639. EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
  640. EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
  641. EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
  642. EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
  643. EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
  644. EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
  645. EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
  646. EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
  647. EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
  648. EXPORT_SYMBOL(kvm_get_inst);
  649. EXPORT_SYMBOL(kvm_arch_vcpu_load);
  650. EXPORT_SYMBOL(kvm_arch_vcpu_put);