tlb.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/init.h>
  4. #include <linux/mm.h>
  5. #include <linux/module.h>
  6. #include <linux/sched.h>
  7. #include <asm/mmu_context.h>
  8. #include <asm/pgtable.h>
  9. #include <asm/setup.h>
  10. #define CSKY_TLB_SIZE CONFIG_CPU_TLB_SIZE
  11. void flush_tlb_all(void)
  12. {
  13. tlb_invalid_all();
  14. }
  15. void flush_tlb_mm(struct mm_struct *mm)
  16. {
  17. int cpu = smp_processor_id();
  18. if (cpu_context(cpu, mm) != 0)
  19. drop_mmu_context(mm, cpu);
  20. tlb_invalid_all();
  21. }
  22. #define restore_asid_inv_utlb(oldpid, newpid) \
  23. do { \
  24. if ((oldpid & ASID_MASK) == newpid) \
  25. write_mmu_entryhi(oldpid + 1); \
  26. write_mmu_entryhi(oldpid); \
  27. } while (0)
  28. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  29. unsigned long end)
  30. {
  31. struct mm_struct *mm = vma->vm_mm;
  32. int cpu = smp_processor_id();
  33. if (cpu_context(cpu, mm) != 0) {
  34. unsigned long size, flags;
  35. int newpid = cpu_asid(cpu, mm);
  36. local_irq_save(flags);
  37. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  38. size = (size + 1) >> 1;
  39. if (size <= CSKY_TLB_SIZE/2) {
  40. start &= (PAGE_MASK << 1);
  41. end += ((PAGE_SIZE << 1) - 1);
  42. end &= (PAGE_MASK << 1);
  43. #ifdef CONFIG_CPU_HAS_TLBI
  44. while (start < end) {
  45. asm volatile("tlbi.vaas %0"
  46. ::"r"(start | newpid));
  47. start += (PAGE_SIZE << 1);
  48. }
  49. sync_is();
  50. #else
  51. {
  52. int oldpid = read_mmu_entryhi();
  53. while (start < end) {
  54. int idx;
  55. write_mmu_entryhi(start | newpid);
  56. start += (PAGE_SIZE << 1);
  57. tlb_probe();
  58. idx = read_mmu_index();
  59. if (idx >= 0)
  60. tlb_invalid_indexed();
  61. }
  62. restore_asid_inv_utlb(oldpid, newpid);
  63. }
  64. #endif
  65. } else {
  66. drop_mmu_context(mm, cpu);
  67. }
  68. local_irq_restore(flags);
  69. }
  70. }
  71. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  72. {
  73. unsigned long size, flags;
  74. local_irq_save(flags);
  75. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  76. if (size <= CSKY_TLB_SIZE) {
  77. start &= (PAGE_MASK << 1);
  78. end += ((PAGE_SIZE << 1) - 1);
  79. end &= (PAGE_MASK << 1);
  80. #ifdef CONFIG_CPU_HAS_TLBI
  81. while (start < end) {
  82. asm volatile("tlbi.vaas %0"::"r"(start));
  83. start += (PAGE_SIZE << 1);
  84. }
  85. sync_is();
  86. #else
  87. {
  88. int oldpid = read_mmu_entryhi();
  89. while (start < end) {
  90. int idx;
  91. write_mmu_entryhi(start);
  92. start += (PAGE_SIZE << 1);
  93. tlb_probe();
  94. idx = read_mmu_index();
  95. if (idx >= 0)
  96. tlb_invalid_indexed();
  97. }
  98. restore_asid_inv_utlb(oldpid, 0);
  99. }
  100. #endif
  101. } else {
  102. flush_tlb_all();
  103. }
  104. local_irq_restore(flags);
  105. }
  106. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  107. {
  108. int cpu = smp_processor_id();
  109. int newpid = cpu_asid(cpu, vma->vm_mm);
  110. if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
  111. page &= (PAGE_MASK << 1);
  112. #ifdef CONFIG_CPU_HAS_TLBI
  113. asm volatile("tlbi.vaas %0"::"r"(page | newpid));
  114. sync_is();
  115. #else
  116. {
  117. int oldpid, idx;
  118. unsigned long flags;
  119. local_irq_save(flags);
  120. oldpid = read_mmu_entryhi();
  121. write_mmu_entryhi(page | newpid);
  122. tlb_probe();
  123. idx = read_mmu_index();
  124. if (idx >= 0)
  125. tlb_invalid_indexed();
  126. restore_asid_inv_utlb(oldpid, newpid);
  127. local_irq_restore(flags);
  128. }
  129. #endif
  130. }
  131. }
  132. /*
  133. * Remove one kernel space TLB entry. This entry is assumed to be marked
  134. * global so we don't do the ASID thing.
  135. */
  136. void flush_tlb_one(unsigned long page)
  137. {
  138. int oldpid;
  139. oldpid = read_mmu_entryhi();
  140. page &= (PAGE_MASK << 1);
  141. #ifdef CONFIG_CPU_HAS_TLBI
  142. page = page | (oldpid & 0xfff);
  143. asm volatile("tlbi.vaas %0"::"r"(page));
  144. sync_is();
  145. #else
  146. {
  147. int idx;
  148. unsigned long flags;
  149. page = page | (oldpid & 0xff);
  150. local_irq_save(flags);
  151. write_mmu_entryhi(page);
  152. tlb_probe();
  153. idx = read_mmu_index();
  154. if (idx >= 0)
  155. tlb_invalid_indexed();
  156. restore_asid_inv_utlb(oldpid, oldpid);
  157. local_irq_restore(flags);
  158. }
  159. #endif
  160. }
  161. EXPORT_SYMBOL(flush_tlb_one);
  162. /* show current 32 jtlbs */
  163. void show_jtlb_table(void)
  164. {
  165. unsigned long flags;
  166. int entryhi, entrylo0, entrylo1;
  167. int entry;
  168. int oldpid;
  169. local_irq_save(flags);
  170. entry = 0;
  171. pr_info("\n\n\n");
  172. oldpid = read_mmu_entryhi();
  173. while (entry < CSKY_TLB_SIZE) {
  174. write_mmu_index(entry);
  175. tlb_read();
  176. entryhi = read_mmu_entryhi();
  177. entrylo0 = read_mmu_entrylo0();
  178. entrylo0 = entrylo0;
  179. entrylo1 = read_mmu_entrylo1();
  180. entrylo1 = entrylo1;
  181. pr_info("jtlb[%d]: entryhi - 0x%x; entrylo0 - 0x%x;"
  182. " entrylo1 - 0x%x\n",
  183. entry, entryhi, entrylo0, entrylo1);
  184. entry++;
  185. }
  186. write_mmu_entryhi(oldpid);
  187. local_irq_restore(flags);
  188. }