tlb.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/delay.h>
  16. #include <linux/export.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/srcu.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlb.h>
  25. #include <asm/tlbdebug.h>
  26. #undef CONFIG_MIPS_MT
  27. #include <asm/r4kcache.h>
  28. #define CONFIG_MIPS_MT
  29. #define KVM_GUEST_PC_TLB 0
  30. #define KVM_GUEST_SP_TLB 1
  31. #ifdef CONFIG_KVM_MIPS_VZ
  32. unsigned long GUESTID_MASK;
  33. EXPORT_SYMBOL_GPL(GUESTID_MASK);
  34. unsigned long GUESTID_FIRST_VERSION;
  35. EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
  36. unsigned long GUESTID_VERSION_MASK;
  37. EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
  38. static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
  39. {
  40. struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
  41. if (cpu_has_guestid)
  42. return 0;
  43. else
  44. return cpu_asid(smp_processor_id(), gpa_mm);
  45. }
  46. #endif
  47. static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  48. {
  49. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  50. int cpu = smp_processor_id();
  51. return cpu_asid(cpu, kern_mm);
  52. }
  53. static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  54. {
  55. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  56. int cpu = smp_processor_id();
  57. return cpu_asid(cpu, user_mm);
  58. }
  59. /* Structure defining an tlb entry data set. */
  60. void kvm_mips_dump_host_tlbs(void)
  61. {
  62. unsigned long flags;
  63. local_irq_save(flags);
  64. kvm_info("HOST TLBs:\n");
  65. dump_tlb_regs();
  66. pr_info("\n");
  67. dump_tlb_all();
  68. local_irq_restore(flags);
  69. }
  70. EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
  71. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  72. {
  73. struct mips_coproc *cop0 = vcpu->arch.cop0;
  74. struct kvm_mips_tlb tlb;
  75. int i;
  76. kvm_info("Guest TLBs:\n");
  77. kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  78. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  79. tlb = vcpu->arch.guest_tlb[i];
  80. kvm_info("TLB%c%3d Hi 0x%08lx ",
  81. (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
  82. ? ' ' : '*',
  83. i, tlb.tlb_hi);
  84. kvm_info("Lo0=0x%09llx %c%c attr %lx ",
  85. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
  86. (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
  87. (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
  88. (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
  89. kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
  90. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
  91. (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
  92. (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
  93. (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
  94. tlb.tlb_mask);
  95. }
  96. }
  97. EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
  98. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  99. {
  100. int i;
  101. int index = -1;
  102. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  103. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  104. if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
  105. TLB_HI_ASID_HIT(tlb[i], entryhi)) {
  106. index = i;
  107. break;
  108. }
  109. }
  110. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  111. __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
  112. return index;
  113. }
  114. EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
  115. static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
  116. {
  117. int idx;
  118. write_c0_entryhi(entryhi);
  119. mtc0_tlbw_hazard();
  120. tlb_probe();
  121. tlb_probe_hazard();
  122. idx = read_c0_index();
  123. if (idx >= current_cpu_data.tlbsize)
  124. BUG();
  125. if (idx >= 0) {
  126. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  127. write_c0_entrylo0(0);
  128. write_c0_entrylo1(0);
  129. mtc0_tlbw_hazard();
  130. tlb_write_indexed();
  131. tlbw_use_hazard();
  132. }
  133. return idx;
  134. }
  135. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
  136. bool user, bool kernel)
  137. {
  138. int idx_user, idx_kernel;
  139. unsigned long flags, old_entryhi;
  140. local_irq_save(flags);
  141. old_entryhi = read_c0_entryhi();
  142. if (user)
  143. idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
  144. kvm_mips_get_user_asid(vcpu));
  145. if (kernel)
  146. idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
  147. kvm_mips_get_kernel_asid(vcpu));
  148. write_c0_entryhi(old_entryhi);
  149. mtc0_tlbw_hazard();
  150. local_irq_restore(flags);
  151. /*
  152. * We don't want to get reserved instruction exceptions for missing tlb
  153. * entries.
  154. */
  155. if (cpu_has_vtag_icache)
  156. flush_icache_all();
  157. if (user && idx_user >= 0)
  158. kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
  159. __func__, (va & VPN2_MASK) |
  160. kvm_mips_get_user_asid(vcpu), idx_user);
  161. if (kernel && idx_kernel >= 0)
  162. kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
  163. __func__, (va & VPN2_MASK) |
  164. kvm_mips_get_kernel_asid(vcpu), idx_kernel);
  165. return 0;
  166. }
  167. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
  168. #ifdef CONFIG_KVM_MIPS_VZ
  169. /* GuestID management */
  170. /**
  171. * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
  172. */
  173. static inline void clear_root_gid(void)
  174. {
  175. if (cpu_has_guestid) {
  176. clear_c0_guestctl1(MIPS_GCTL1_RID);
  177. mtc0_tlbw_hazard();
  178. }
  179. }
  180. /**
  181. * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
  182. *
  183. * Sets the root GuestID to match the current guest GuestID, for TLB operation
  184. * on the GPA->RPA mappings in the root TLB.
  185. *
  186. * The caller must be sure to disable HTW while the root GID is set, and
  187. * possibly longer if TLB registers are modified.
  188. */
  189. static inline void set_root_gid_to_guest_gid(void)
  190. {
  191. unsigned int guestctl1;
  192. if (cpu_has_guestid) {
  193. back_to_back_c0_hazard();
  194. guestctl1 = read_c0_guestctl1();
  195. guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
  196. ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
  197. << MIPS_GCTL1_RID_SHIFT;
  198. write_c0_guestctl1(guestctl1);
  199. mtc0_tlbw_hazard();
  200. }
  201. }
  202. int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
  203. {
  204. int idx;
  205. unsigned long flags, old_entryhi;
  206. local_irq_save(flags);
  207. htw_stop();
  208. /* Set root GuestID for root probe and write of guest TLB entry */
  209. set_root_gid_to_guest_gid();
  210. old_entryhi = read_c0_entryhi();
  211. idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
  212. kvm_mips_get_root_asid(vcpu));
  213. write_c0_entryhi(old_entryhi);
  214. clear_root_gid();
  215. mtc0_tlbw_hazard();
  216. htw_start();
  217. local_irq_restore(flags);
  218. /*
  219. * We don't want to get reserved instruction exceptions for missing tlb
  220. * entries.
  221. */
  222. if (cpu_has_vtag_icache)
  223. flush_icache_all();
  224. if (idx > 0)
  225. kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
  226. __func__, (va & VPN2_MASK) |
  227. kvm_mips_get_root_asid(vcpu), idx);
  228. return 0;
  229. }
  230. EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
  231. /**
  232. * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
  233. * @vcpu: KVM VCPU pointer.
  234. * @gpa: Guest virtual address in a TLB mapped guest segment.
  235. * @gpa: Ponter to output guest physical address it maps to.
  236. *
  237. * Converts a guest virtual address in a guest TLB mapped segment to a guest
  238. * physical address, by probing the guest TLB.
  239. *
  240. * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
  241. * written.
  242. * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
  243. * have been written.
  244. */
  245. int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
  246. unsigned long *gpa)
  247. {
  248. unsigned long o_entryhi, o_entrylo[2], o_pagemask;
  249. unsigned int o_index;
  250. unsigned long entrylo[2], pagemask, pagemaskbit, pa;
  251. unsigned long flags;
  252. int index;
  253. /* Probe the guest TLB for a mapping */
  254. local_irq_save(flags);
  255. /* Set root GuestID for root probe of guest TLB entry */
  256. htw_stop();
  257. set_root_gid_to_guest_gid();
  258. o_entryhi = read_gc0_entryhi();
  259. o_index = read_gc0_index();
  260. write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
  261. mtc0_tlbw_hazard();
  262. guest_tlb_probe();
  263. tlb_probe_hazard();
  264. index = read_gc0_index();
  265. if (index < 0) {
  266. /* No match, fail */
  267. write_gc0_entryhi(o_entryhi);
  268. write_gc0_index(o_index);
  269. clear_root_gid();
  270. htw_start();
  271. local_irq_restore(flags);
  272. return -EFAULT;
  273. }
  274. /* Match! read the TLB entry */
  275. o_entrylo[0] = read_gc0_entrylo0();
  276. o_entrylo[1] = read_gc0_entrylo1();
  277. o_pagemask = read_gc0_pagemask();
  278. mtc0_tlbr_hazard();
  279. guest_tlb_read();
  280. tlb_read_hazard();
  281. entrylo[0] = read_gc0_entrylo0();
  282. entrylo[1] = read_gc0_entrylo1();
  283. pagemask = ~read_gc0_pagemask() & ~0x1fffl;
  284. write_gc0_entryhi(o_entryhi);
  285. write_gc0_index(o_index);
  286. write_gc0_entrylo0(o_entrylo[0]);
  287. write_gc0_entrylo1(o_entrylo[1]);
  288. write_gc0_pagemask(o_pagemask);
  289. clear_root_gid();
  290. htw_start();
  291. local_irq_restore(flags);
  292. /* Select one of the EntryLo values and interpret the GPA */
  293. pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
  294. pa = entrylo[!!(gva & pagemaskbit)];
  295. /*
  296. * TLB entry may have become invalid since TLB probe if physical FTLB
  297. * entries are shared between threads (e.g. I6400).
  298. */
  299. if (!(pa & ENTRYLO_V))
  300. return -EFAULT;
  301. /*
  302. * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
  303. * split with XI/RI in the middle.
  304. */
  305. pa = (pa << 6) & ~0xfffl;
  306. pa |= gva & ~(pagemask | pagemaskbit);
  307. *gpa = pa;
  308. return 0;
  309. }
  310. EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
  311. /**
  312. * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
  313. * guests.
  314. *
  315. * Invalidate all entries in root tlb which are GPA mappings.
  316. */
  317. void kvm_vz_local_flush_roottlb_all_guests(void)
  318. {
  319. unsigned long flags;
  320. unsigned long old_entryhi, old_pagemask, old_guestctl1;
  321. int entry;
  322. if (WARN_ON(!cpu_has_guestid))
  323. return;
  324. local_irq_save(flags);
  325. htw_stop();
  326. /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
  327. old_entryhi = read_c0_entryhi();
  328. old_pagemask = read_c0_pagemask();
  329. old_guestctl1 = read_c0_guestctl1();
  330. /*
  331. * Invalidate guest entries in root TLB while leaving root entries
  332. * intact when possible.
  333. */
  334. for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
  335. write_c0_index(entry);
  336. mtc0_tlbw_hazard();
  337. tlb_read();
  338. tlb_read_hazard();
  339. /* Don't invalidate non-guest (RVA) mappings in the root TLB */
  340. if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
  341. continue;
  342. /* Make sure all entries differ. */
  343. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  344. write_c0_entrylo0(0);
  345. write_c0_entrylo1(0);
  346. write_c0_guestctl1(0);
  347. mtc0_tlbw_hazard();
  348. tlb_write_indexed();
  349. }
  350. write_c0_entryhi(old_entryhi);
  351. write_c0_pagemask(old_pagemask);
  352. write_c0_guestctl1(old_guestctl1);
  353. tlbw_use_hazard();
  354. htw_start();
  355. local_irq_restore(flags);
  356. }
  357. EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
  358. /**
  359. * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
  360. *
  361. * Invalidate all entries in guest tlb irrespective of guestid.
  362. */
  363. void kvm_vz_local_flush_guesttlb_all(void)
  364. {
  365. unsigned long flags;
  366. unsigned long old_index;
  367. unsigned long old_entryhi;
  368. unsigned long old_entrylo[2];
  369. unsigned long old_pagemask;
  370. int entry;
  371. u64 cvmmemctl2 = 0;
  372. local_irq_save(flags);
  373. /* Preserve all clobbered guest registers */
  374. old_index = read_gc0_index();
  375. old_entryhi = read_gc0_entryhi();
  376. old_entrylo[0] = read_gc0_entrylo0();
  377. old_entrylo[1] = read_gc0_entrylo1();
  378. old_pagemask = read_gc0_pagemask();
  379. switch (current_cpu_type()) {
  380. case CPU_CAVIUM_OCTEON3:
  381. /* Inhibit machine check due to multiple matching TLB entries */
  382. cvmmemctl2 = read_c0_cvmmemctl2();
  383. cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
  384. write_c0_cvmmemctl2(cvmmemctl2);
  385. break;
  386. };
  387. /* Invalidate guest entries in guest TLB */
  388. write_gc0_entrylo0(0);
  389. write_gc0_entrylo1(0);
  390. write_gc0_pagemask(0);
  391. for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
  392. /* Make sure all entries differ. */
  393. write_gc0_index(entry);
  394. write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
  395. mtc0_tlbw_hazard();
  396. guest_tlb_write_indexed();
  397. }
  398. if (cvmmemctl2) {
  399. cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
  400. write_c0_cvmmemctl2(cvmmemctl2);
  401. };
  402. write_gc0_index(old_index);
  403. write_gc0_entryhi(old_entryhi);
  404. write_gc0_entrylo0(old_entrylo[0]);
  405. write_gc0_entrylo1(old_entrylo[1]);
  406. write_gc0_pagemask(old_pagemask);
  407. tlbw_use_hazard();
  408. local_irq_restore(flags);
  409. }
  410. EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
  411. /**
  412. * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
  413. * @buf: Buffer to write TLB entries into.
  414. * @index: Start index.
  415. * @count: Number of entries to save.
  416. *
  417. * Save a range of guest TLB entries. The caller must ensure interrupts are
  418. * disabled.
  419. */
  420. void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
  421. unsigned int count)
  422. {
  423. unsigned int end = index + count;
  424. unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
  425. unsigned int guestctl1 = 0;
  426. int old_index, i;
  427. /* Save registers we're about to clobber */
  428. old_index = read_gc0_index();
  429. old_entryhi = read_gc0_entryhi();
  430. old_entrylo0 = read_gc0_entrylo0();
  431. old_entrylo1 = read_gc0_entrylo1();
  432. old_pagemask = read_gc0_pagemask();
  433. /* Set root GuestID for root probe */
  434. htw_stop();
  435. set_root_gid_to_guest_gid();
  436. if (cpu_has_guestid)
  437. guestctl1 = read_c0_guestctl1();
  438. /* Read each entry from guest TLB */
  439. for (i = index; i < end; ++i, ++buf) {
  440. write_gc0_index(i);
  441. mtc0_tlbr_hazard();
  442. guest_tlb_read();
  443. tlb_read_hazard();
  444. if (cpu_has_guestid &&
  445. (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
  446. /* Entry invalid or belongs to another guest */
  447. buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
  448. buf->tlb_lo[0] = 0;
  449. buf->tlb_lo[1] = 0;
  450. buf->tlb_mask = 0;
  451. } else {
  452. /* Entry belongs to the right guest */
  453. buf->tlb_hi = read_gc0_entryhi();
  454. buf->tlb_lo[0] = read_gc0_entrylo0();
  455. buf->tlb_lo[1] = read_gc0_entrylo1();
  456. buf->tlb_mask = read_gc0_pagemask();
  457. }
  458. }
  459. /* Clear root GuestID again */
  460. clear_root_gid();
  461. htw_start();
  462. /* Restore clobbered registers */
  463. write_gc0_index(old_index);
  464. write_gc0_entryhi(old_entryhi);
  465. write_gc0_entrylo0(old_entrylo0);
  466. write_gc0_entrylo1(old_entrylo1);
  467. write_gc0_pagemask(old_pagemask);
  468. tlbw_use_hazard();
  469. }
  470. EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
  471. /**
  472. * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
  473. * @buf: Buffer to read TLB entries from.
  474. * @index: Start index.
  475. * @count: Number of entries to load.
  476. *
  477. * Load a range of guest TLB entries. The caller must ensure interrupts are
  478. * disabled.
  479. */
  480. void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
  481. unsigned int count)
  482. {
  483. unsigned int end = index + count;
  484. unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
  485. int old_index, i;
  486. /* Save registers we're about to clobber */
  487. old_index = read_gc0_index();
  488. old_entryhi = read_gc0_entryhi();
  489. old_entrylo0 = read_gc0_entrylo0();
  490. old_entrylo1 = read_gc0_entrylo1();
  491. old_pagemask = read_gc0_pagemask();
  492. /* Set root GuestID for root probe */
  493. htw_stop();
  494. set_root_gid_to_guest_gid();
  495. /* Write each entry to guest TLB */
  496. for (i = index; i < end; ++i, ++buf) {
  497. write_gc0_index(i);
  498. write_gc0_entryhi(buf->tlb_hi);
  499. write_gc0_entrylo0(buf->tlb_lo[0]);
  500. write_gc0_entrylo1(buf->tlb_lo[1]);
  501. write_gc0_pagemask(buf->tlb_mask);
  502. mtc0_tlbw_hazard();
  503. guest_tlb_write_indexed();
  504. }
  505. /* Clear root GuestID again */
  506. clear_root_gid();
  507. htw_start();
  508. /* Restore clobbered registers */
  509. write_gc0_index(old_index);
  510. write_gc0_entryhi(old_entryhi);
  511. write_gc0_entrylo0(old_entrylo0);
  512. write_gc0_entrylo1(old_entrylo1);
  513. write_gc0_pagemask(old_pagemask);
  514. tlbw_use_hazard();
  515. }
  516. EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
  517. #endif
  518. /**
  519. * kvm_mips_suspend_mm() - Suspend the active mm.
  520. * @cpu The CPU we're running on.
  521. *
  522. * Suspend the active_mm, ready for a switch to a KVM guest virtual address
  523. * space. This is left active for the duration of guest context, including time
  524. * with interrupts enabled, so we need to be careful not to confuse e.g. cache
  525. * management IPIs.
  526. *
  527. * kvm_mips_resume_mm() should be called before context switching to a different
  528. * process so we don't need to worry about reference counting.
  529. *
  530. * This needs to be in static kernel code to avoid exporting init_mm.
  531. */
  532. void kvm_mips_suspend_mm(int cpu)
  533. {
  534. cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
  535. current->active_mm = &init_mm;
  536. }
  537. EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
  538. /**
  539. * kvm_mips_resume_mm() - Resume the current process mm.
  540. * @cpu The CPU we're running on.
  541. *
  542. * Resume the mm of the current process, after a switch back from a KVM guest
  543. * virtual address space (see kvm_mips_suspend_mm()).
  544. */
  545. void kvm_mips_resume_mm(int cpu)
  546. {
  547. cpumask_set_cpu(cpu, mm_cpumask(current->mm));
  548. current->active_mm = current->mm;
  549. }
  550. EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);