tlbflush.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Based on arch/arm/include/asm/tlbflush.h
  3. *
  4. * Copyright (C) 1999-2003 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef __ASM_TLBFLUSH_H
  20. #define __ASM_TLBFLUSH_H
  21. #ifndef __ASSEMBLY__
  22. #include <linux/sched.h>
  23. #include <asm/cputype.h>
  24. #include <asm/mmu.h>
  25. /*
  26. * Raw TLBI operations.
  27. *
  28. * Where necessary, use the __tlbi() macro to avoid asm()
  29. * boilerplate. Drivers and most kernel code should use the TLB
  30. * management routines in preference to the macro below.
  31. *
  32. * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
  33. * on whether a particular TLBI operation takes an argument or
  34. * not. The macros handles invoking the asm with or without the
  35. * register argument as appropriate.
  36. */
  37. #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
  38. ALTERNATIVE("nop\n nop", \
  39. "dsb ish\n tlbi " #op, \
  40. ARM64_WORKAROUND_REPEAT_TLBI, \
  41. CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
  42. : : )
  43. #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
  44. ALTERNATIVE("nop\n nop", \
  45. "dsb ish\n tlbi " #op ", %0", \
  46. ARM64_WORKAROUND_REPEAT_TLBI, \
  47. CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
  48. : : "r" (arg))
  49. #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
  50. #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
  51. #define __tlbi_user(op, arg) do { \
  52. if (arm64_kernel_unmapped_at_el0()) \
  53. __tlbi(op, (arg) | USER_ASID_FLAG); \
  54. } while (0)
  55. /* This macro creates a properly formatted VA operand for the TLBI */
  56. #define __TLBI_VADDR(addr, asid) \
  57. ({ \
  58. unsigned long __ta = (addr) >> 12; \
  59. __ta &= GENMASK_ULL(43, 0); \
  60. __ta |= (unsigned long)(asid) << 48; \
  61. __ta; \
  62. })
  63. /*
  64. * TLB Invalidation
  65. * ================
  66. *
  67. * This header file implements the low-level TLB invalidation routines
  68. * (sometimes referred to as "flushing" in the kernel) for arm64.
  69. *
  70. * Every invalidation operation uses the following template:
  71. *
  72. * DSB ISHST // Ensure prior page-table updates have completed
  73. * TLBI ... // Invalidate the TLB
  74. * DSB ISH // Ensure the TLB invalidation has completed
  75. * if (invalidated kernel mappings)
  76. * ISB // Discard any instructions fetched from the old mapping
  77. *
  78. *
  79. * The following functions form part of the "core" TLB invalidation API,
  80. * as documented in Documentation/core-api/cachetlb.rst:
  81. *
  82. * flush_tlb_all()
  83. * Invalidate the entire TLB (kernel + user) on all CPUs
  84. *
  85. * flush_tlb_mm(mm)
  86. * Invalidate an entire user address space on all CPUs.
  87. * The 'mm' argument identifies the ASID to invalidate.
  88. *
  89. * flush_tlb_range(vma, start, end)
  90. * Invalidate the virtual-address range '[start, end)' on all
  91. * CPUs for the user address space corresponding to 'vma->mm'.
  92. * Note that this operation also invalidates any walk-cache
  93. * entries associated with translations for the specified address
  94. * range.
  95. *
  96. * flush_tlb_kernel_range(start, end)
  97. * Same as flush_tlb_range(..., start, end), but applies to
  98. * kernel mappings rather than a particular user address space.
  99. * Whilst not explicitly documented, this function is used when
  100. * unmapping pages from vmalloc/io space.
  101. *
  102. * flush_tlb_page(vma, addr)
  103. * Invalidate a single user mapping for address 'addr' in the
  104. * address space corresponding to 'vma->mm'. Note that this
  105. * operation only invalidates a single, last-level page-table
  106. * entry and therefore does not affect any walk-caches.
  107. *
  108. *
  109. * Next, we have some undocumented invalidation routines that you probably
  110. * don't want to call unless you know what you're doing:
  111. *
  112. * local_flush_tlb_all()
  113. * Same as flush_tlb_all(), but only applies to the calling CPU.
  114. *
  115. * __flush_tlb_kernel_pgtable(addr)
  116. * Invalidate a single kernel mapping for address 'addr' on all
  117. * CPUs, ensuring that any walk-cache entries associated with the
  118. * translation are also invalidated.
  119. *
  120. * __flush_tlb_range(vma, start, end, stride, last_level)
  121. * Invalidate the virtual-address range '[start, end)' on all
  122. * CPUs for the user address space corresponding to 'vma->mm'.
  123. * The invalidation operations are issued at a granularity
  124. * determined by 'stride' and only affect any walk-cache entries
  125. * if 'last_level' is equal to false.
  126. *
  127. *
  128. * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
  129. * on top of these routines, since that is our interface to the mmu_gather
  130. * API as used by munmap() and friends.
  131. */
  132. static inline void local_flush_tlb_all(void)
  133. {
  134. dsb(nshst);
  135. __tlbi(vmalle1);
  136. dsb(nsh);
  137. isb();
  138. }
  139. static inline void flush_tlb_all(void)
  140. {
  141. dsb(ishst);
  142. __tlbi(vmalle1is);
  143. dsb(ish);
  144. isb();
  145. }
  146. static inline void flush_tlb_mm(struct mm_struct *mm)
  147. {
  148. unsigned long asid = __TLBI_VADDR(0, ASID(mm));
  149. dsb(ishst);
  150. __tlbi(aside1is, asid);
  151. __tlbi_user(aside1is, asid);
  152. dsb(ish);
  153. }
  154. static inline void flush_tlb_page(struct vm_area_struct *vma,
  155. unsigned long uaddr)
  156. {
  157. unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
  158. dsb(ishst);
  159. __tlbi(vale1is, addr);
  160. __tlbi_user(vale1is, addr);
  161. dsb(ish);
  162. }
  163. /*
  164. * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  165. * necessarily a performance improvement.
  166. */
  167. #define MAX_TLBI_OPS 1024UL
  168. static inline void __flush_tlb_range(struct vm_area_struct *vma,
  169. unsigned long start, unsigned long end,
  170. unsigned long stride, bool last_level)
  171. {
  172. unsigned long asid = ASID(vma->vm_mm);
  173. unsigned long addr;
  174. if ((end - start) > (MAX_TLBI_OPS * stride)) {
  175. flush_tlb_mm(vma->vm_mm);
  176. return;
  177. }
  178. /* Convert the stride into units of 4k */
  179. stride >>= 12;
  180. start = __TLBI_VADDR(start, asid);
  181. end = __TLBI_VADDR(end, asid);
  182. dsb(ishst);
  183. for (addr = start; addr < end; addr += stride) {
  184. if (last_level) {
  185. __tlbi(vale1is, addr);
  186. __tlbi_user(vale1is, addr);
  187. } else {
  188. __tlbi(vae1is, addr);
  189. __tlbi_user(vae1is, addr);
  190. }
  191. }
  192. dsb(ish);
  193. }
  194. static inline void flush_tlb_range(struct vm_area_struct *vma,
  195. unsigned long start, unsigned long end)
  196. {
  197. /*
  198. * We cannot use leaf-only invalidation here, since we may be invalidating
  199. * table entries as part of collapsing hugepages or moving page tables.
  200. */
  201. __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
  202. }
  203. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  204. {
  205. unsigned long addr;
  206. if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
  207. flush_tlb_all();
  208. return;
  209. }
  210. start = __TLBI_VADDR(start, 0);
  211. end = __TLBI_VADDR(end, 0);
  212. dsb(ishst);
  213. for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
  214. __tlbi(vaale1is, addr);
  215. dsb(ish);
  216. isb();
  217. }
  218. /*
  219. * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  220. * table levels (pgd/pud/pmd).
  221. */
  222. static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
  223. {
  224. unsigned long addr = __TLBI_VADDR(kaddr, 0);
  225. dsb(ishst);
  226. __tlbi(vaae1is, addr);
  227. dsb(ish);
  228. }
  229. #endif
  230. #endif