tlb.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/delay.h>
  16. #include <linux/export.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/srcu.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlb.h>
  25. #include <asm/tlbdebug.h>
  26. #undef CONFIG_MIPS_MT
  27. #include <asm/r4kcache.h>
  28. #define CONFIG_MIPS_MT
  29. #define KVM_GUEST_PC_TLB 0
  30. #define KVM_GUEST_SP_TLB 1
  31. static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  32. {
  33. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  34. int cpu = smp_processor_id();
  35. return cpu_asid(cpu, kern_mm);
  36. }
  37. static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  38. {
  39. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  40. int cpu = smp_processor_id();
  41. return cpu_asid(cpu, user_mm);
  42. }
  43. /* Structure defining an tlb entry data set. */
  44. void kvm_mips_dump_host_tlbs(void)
  45. {
  46. unsigned long flags;
  47. local_irq_save(flags);
  48. kvm_info("HOST TLBs:\n");
  49. dump_tlb_regs();
  50. pr_info("\n");
  51. dump_tlb_all();
  52. local_irq_restore(flags);
  53. }
  54. EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
  55. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  56. {
  57. struct mips_coproc *cop0 = vcpu->arch.cop0;
  58. struct kvm_mips_tlb tlb;
  59. int i;
  60. kvm_info("Guest TLBs:\n");
  61. kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  62. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  63. tlb = vcpu->arch.guest_tlb[i];
  64. kvm_info("TLB%c%3d Hi 0x%08lx ",
  65. (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
  66. ? ' ' : '*',
  67. i, tlb.tlb_hi);
  68. kvm_info("Lo0=0x%09llx %c%c attr %lx ",
  69. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
  70. (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
  71. (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
  72. (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
  73. kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
  74. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
  75. (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
  76. (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
  77. (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
  78. tlb.tlb_mask);
  79. }
  80. }
  81. EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
  82. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  83. {
  84. int i;
  85. int index = -1;
  86. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  87. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  88. if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
  89. TLB_HI_ASID_HIT(tlb[i], entryhi)) {
  90. index = i;
  91. break;
  92. }
  93. }
  94. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  95. __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
  96. return index;
  97. }
  98. EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
  99. static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
  100. {
  101. int idx;
  102. write_c0_entryhi(entryhi);
  103. mtc0_tlbw_hazard();
  104. tlb_probe();
  105. tlb_probe_hazard();
  106. idx = read_c0_index();
  107. if (idx >= current_cpu_data.tlbsize)
  108. BUG();
  109. if (idx >= 0) {
  110. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  111. write_c0_entrylo0(0);
  112. write_c0_entrylo1(0);
  113. mtc0_tlbw_hazard();
  114. tlb_write_indexed();
  115. tlbw_use_hazard();
  116. }
  117. return idx;
  118. }
  119. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
  120. bool user, bool kernel)
  121. {
  122. int idx_user, idx_kernel;
  123. unsigned long flags, old_entryhi;
  124. local_irq_save(flags);
  125. old_entryhi = read_c0_entryhi();
  126. if (user)
  127. idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
  128. kvm_mips_get_user_asid(vcpu));
  129. if (kernel)
  130. idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
  131. kvm_mips_get_kernel_asid(vcpu));
  132. write_c0_entryhi(old_entryhi);
  133. mtc0_tlbw_hazard();
  134. local_irq_restore(flags);
  135. if (user && idx_user >= 0)
  136. kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
  137. __func__, (va & VPN2_MASK) |
  138. kvm_mips_get_user_asid(vcpu), idx_user);
  139. if (kernel && idx_kernel >= 0)
  140. kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
  141. __func__, (va & VPN2_MASK) |
  142. kvm_mips_get_kernel_asid(vcpu), idx_kernel);
  143. return 0;
  144. }
  145. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
  146. /**
  147. * kvm_mips_suspend_mm() - Suspend the active mm.
  148. * @cpu The CPU we're running on.
  149. *
  150. * Suspend the active_mm, ready for a switch to a KVM guest virtual address
  151. * space. This is left active for the duration of guest context, including time
  152. * with interrupts enabled, so we need to be careful not to confuse e.g. cache
  153. * management IPIs.
  154. *
  155. * kvm_mips_resume_mm() should be called before context switching to a different
  156. * process so we don't need to worry about reference counting.
  157. *
  158. * This needs to be in static kernel code to avoid exporting init_mm.
  159. */
  160. void kvm_mips_suspend_mm(int cpu)
  161. {
  162. cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
  163. current->active_mm = &init_mm;
  164. }
  165. EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
  166. /**
  167. * kvm_mips_resume_mm() - Resume the current process mm.
  168. * @cpu The CPU we're running on.
  169. *
  170. * Resume the mm of the current process, after a switch back from a KVM guest
  171. * virtual address space (see kvm_mips_suspend_mm()).
  172. */
  173. void kvm_mips_resume_mm(int cpu)
  174. {
  175. cpumask_set_cpu(cpu, mm_cpumask(current->mm));
  176. current->active_mm = current->mm;
  177. }
  178. EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);