tlb.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_IA64_TLB_H
  3. #define _ASM_IA64_TLB_H
  4. /*
  5. * Based on <asm-generic/tlb.h>.
  6. *
  7. * Copyright (C) 2002-2003 Hewlett-Packard Co
  8. * David Mosberger-Tang <davidm@hpl.hp.com>
  9. */
  10. /*
  11. * Removing a translation from a page table (including TLB-shootdown) is a four-step
  12. * procedure:
  13. *
  14. * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
  15. * (this is a no-op on ia64).
  16. * (2) Clear the relevant portions of the page-table
  17. * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
  18. * (4) Release the pages that were freed up in step (2).
  19. *
  20. * Note that the ordering of these steps is crucial to avoid races on MP machines.
  21. *
  22. * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
  23. * unmapping a portion of the virtual address space, these hooks are called according to
  24. * the following template:
  25. *
  26. * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
  27. * {
  28. * for each vma that needs a shootdown do {
  29. * tlb_start_vma(tlb, vma);
  30. * for each page-table-entry PTE that needs to be removed do {
  31. * tlb_remove_tlb_entry(tlb, pte, address);
  32. * if (pte refers to a normal page) {
  33. * tlb_remove_page(tlb, page);
  34. * }
  35. * }
  36. * tlb_end_vma(tlb, vma);
  37. * }
  38. * }
  39. * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
  40. */
  41. #include <linux/mm.h>
  42. #include <linux/pagemap.h>
  43. #include <linux/swap.h>
  44. #include <asm/pgalloc.h>
  45. #include <asm/processor.h>
  46. #include <asm/tlbflush.h>
  47. #include <asm/machvec.h>
  48. /*
  49. * If we can't allocate a page to make a big batch of page pointers
  50. * to work on, then just handle a few from the on-stack structure.
  51. */
  52. #define IA64_GATHER_BUNDLE 8
  53. struct mmu_gather {
  54. struct mm_struct *mm;
  55. unsigned int nr;
  56. unsigned int max;
  57. unsigned char fullmm; /* non-zero means full mm flush */
  58. unsigned char need_flush; /* really unmapped some PTEs? */
  59. unsigned long start, end;
  60. unsigned long start_addr;
  61. unsigned long end_addr;
  62. struct page **pages;
  63. struct page *local[IA64_GATHER_BUNDLE];
  64. };
  65. struct ia64_tr_entry {
  66. u64 ifa;
  67. u64 itir;
  68. u64 pte;
  69. u64 rr;
  70. }; /*Record for tr entry!*/
  71. extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
  72. extern void ia64_ptr_entry(u64 target_mask, int slot);
  73. extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
  74. /*
  75. region register macros
  76. */
  77. #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
  78. #define RR_VE(val) (((val) & 0x0000000000000001) << 0)
  79. #define RR_VE_MASK 0x0000000000000001L
  80. #define RR_VE_SHIFT 0
  81. #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
  82. #define RR_PS(val) (((val) & 0x000000000000003f) << 2)
  83. #define RR_PS_MASK 0x00000000000000fcL
  84. #define RR_PS_SHIFT 2
  85. #define RR_RID_MASK 0x00000000ffffff00L
  86. #define RR_TO_RID(val) ((val >> 8) & 0xffffff)
  87. static inline void
  88. ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  89. {
  90. tlb->need_flush = 0;
  91. if (tlb->fullmm) {
  92. /*
  93. * Tearing down the entire address space. This happens both as a result
  94. * of exit() and execve(). The latter case necessitates the call to
  95. * flush_tlb_mm() here.
  96. */
  97. flush_tlb_mm(tlb->mm);
  98. } else if (unlikely (end - start >= 1024*1024*1024*1024UL
  99. || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
  100. {
  101. /*
  102. * If we flush more than a tera-byte or across regions, we're probably
  103. * better off just flushing the entire TLB(s). This should be very rare
  104. * and is not worth optimizing for.
  105. */
  106. flush_tlb_all();
  107. } else {
  108. /*
  109. * flush_tlb_range() takes a vma instead of a mm pointer because
  110. * some architectures want the vm_flags for ITLB/DTLB flush.
  111. */
  112. struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
  113. /* flush the address range from the tlb: */
  114. flush_tlb_range(&vma, start, end);
  115. /* now flush the virt. page-table area mapping the address range: */
  116. flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
  117. }
  118. }
  119. static inline void
  120. ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
  121. {
  122. unsigned long i;
  123. unsigned int nr;
  124. /* lastly, release the freed pages */
  125. nr = tlb->nr;
  126. tlb->nr = 0;
  127. tlb->start_addr = ~0UL;
  128. for (i = 0; i < nr; ++i)
  129. free_page_and_swap_cache(tlb->pages[i]);
  130. }
  131. /*
  132. * Flush the TLB for address range START to END and, if not in fast mode, release the
  133. * freed pages that where gathered up to this point.
  134. */
  135. static inline void
  136. ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
  137. {
  138. if (!tlb->need_flush)
  139. return;
  140. ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
  141. ia64_tlb_flush_mmu_free(tlb);
  142. }
  143. static inline void __tlb_alloc_page(struct mmu_gather *tlb)
  144. {
  145. unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  146. if (addr) {
  147. tlb->pages = (void *)addr;
  148. tlb->max = PAGE_SIZE / sizeof(void *);
  149. }
  150. }
  151. static inline void
  152. arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  153. unsigned long start, unsigned long end)
  154. {
  155. tlb->mm = mm;
  156. tlb->max = ARRAY_SIZE(tlb->local);
  157. tlb->pages = tlb->local;
  158. tlb->nr = 0;
  159. tlb->fullmm = !(start | (end+1));
  160. tlb->start = start;
  161. tlb->end = end;
  162. tlb->start_addr = ~0UL;
  163. }
  164. /*
  165. * Called at the end of the shootdown operation to free up any resources that were
  166. * collected.
  167. */
  168. static inline void
  169. arch_tlb_finish_mmu(struct mmu_gather *tlb,
  170. unsigned long start, unsigned long end, bool force)
  171. {
  172. if (force)
  173. tlb->need_flush = 1;
  174. /*
  175. * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
  176. * tlb->end_addr.
  177. */
  178. ia64_tlb_flush_mmu(tlb, start, end);
  179. /* keep the page table cache within bounds */
  180. check_pgt_cache();
  181. if (tlb->pages != tlb->local)
  182. free_pages((unsigned long)tlb->pages, 0);
  183. }
  184. /*
  185. * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
  186. * must be delayed until after the TLB has been flushed (see comments at the beginning of
  187. * this file).
  188. */
  189. static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  190. {
  191. tlb->need_flush = 1;
  192. if (!tlb->nr && tlb->pages == tlb->local)
  193. __tlb_alloc_page(tlb);
  194. tlb->pages[tlb->nr++] = page;
  195. VM_WARN_ON(tlb->nr > tlb->max);
  196. if (tlb->nr == tlb->max)
  197. return true;
  198. return false;
  199. }
  200. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  201. {
  202. ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
  203. }
  204. static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
  205. {
  206. ia64_tlb_flush_mmu_free(tlb);
  207. }
  208. static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  209. {
  210. ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
  211. }
  212. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  213. {
  214. if (__tlb_remove_page(tlb, page))
  215. tlb_flush_mmu(tlb);
  216. }
  217. static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  218. struct page *page, int page_size)
  219. {
  220. return __tlb_remove_page(tlb, page);
  221. }
  222. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  223. struct page *page, int page_size)
  224. {
  225. return tlb_remove_page(tlb, page);
  226. }
  227. /*
  228. * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
  229. * PTE, not just those pointing to (normal) physical memory.
  230. */
  231. static inline void
  232. __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
  233. {
  234. if (tlb->start_addr == ~0UL)
  235. tlb->start_addr = address;
  236. tlb->end_addr = address + PAGE_SIZE;
  237. }
  238. #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
  239. #define tlb_start_vma(tlb, vma) do { } while (0)
  240. #define tlb_end_vma(tlb, vma) do { } while (0)
  241. #define tlb_remove_tlb_entry(tlb, ptep, addr) \
  242. do { \
  243. tlb->need_flush = 1; \
  244. __tlb_remove_tlb_entry(tlb, ptep, addr); \
  245. } while (0)
  246. #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
  247. tlb_remove_tlb_entry(tlb, ptep, address)
  248. #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
  249. static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  250. unsigned int page_size)
  251. {
  252. }
  253. #define pte_free_tlb(tlb, ptep, address) \
  254. do { \
  255. tlb->need_flush = 1; \
  256. __pte_free_tlb(tlb, ptep, address); \
  257. } while (0)
  258. #define pmd_free_tlb(tlb, ptep, address) \
  259. do { \
  260. tlb->need_flush = 1; \
  261. __pmd_free_tlb(tlb, ptep, address); \
  262. } while (0)
  263. #define pud_free_tlb(tlb, pudp, address) \
  264. do { \
  265. tlb->need_flush = 1; \
  266. __pud_free_tlb(tlb, pudp, address); \
  267. } while (0)
  268. #endif /* _ASM_IA64_TLB_H */