tlb.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * arch/arm/include/asm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Experimentation shows that on a StrongARM, it appears to be faster
  11. * to use the "invalidate whole tlb" rather than "invalidate single
  12. * tlb" for this.
  13. *
  14. * This appears true for both the process fork+exit case, as well as
  15. * the munmap-large-area case.
  16. */
  17. #ifndef __ASMARM_TLB_H
  18. #define __ASMARM_TLB_H
  19. #include <asm/cacheflush.h>
  20. #ifndef CONFIG_MMU
  21. #include <linux/pagemap.h>
  22. #define tlb_flush(tlb) ((void) tlb)
  23. #include <asm-generic/tlb.h>
  24. #else /* !CONFIG_MMU */
  25. #include <linux/swap.h>
  26. #include <asm/pgalloc.h>
  27. #include <asm/tlbflush.h>
  28. #define MMU_GATHER_BUNDLE 8
  29. /*
  30. * TLB handling. This allows us to remove pages from the page
  31. * tables, and efficiently handle the TLB issues.
  32. */
  33. struct mmu_gather {
  34. struct mm_struct *mm;
  35. unsigned int fullmm;
  36. struct vm_area_struct *vma;
  37. unsigned long start, end;
  38. unsigned long range_start;
  39. unsigned long range_end;
  40. unsigned int nr;
  41. unsigned int max;
  42. struct page **pages;
  43. struct page *local[MMU_GATHER_BUNDLE];
  44. };
  45. DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
  46. /*
  47. * This is unnecessarily complex. There's three ways the TLB shootdown
  48. * code is used:
  49. * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
  50. * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
  51. * tlb->vma will be non-NULL.
  52. * 2. Unmapping all vmas. See exit_mmap().
  53. * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
  54. * tlb->vma will be non-NULL. Additionally, page tables will be freed.
  55. * 3. Unmapping argument pages. See shift_arg_pages().
  56. * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
  57. * tlb->vma will be NULL.
  58. */
  59. static inline void tlb_flush(struct mmu_gather *tlb)
  60. {
  61. if (tlb->fullmm || !tlb->vma)
  62. flush_tlb_mm(tlb->mm);
  63. else if (tlb->range_end > 0) {
  64. flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
  65. tlb->range_start = TASK_SIZE;
  66. tlb->range_end = 0;
  67. }
  68. }
  69. static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
  70. {
  71. if (!tlb->fullmm) {
  72. if (addr < tlb->range_start)
  73. tlb->range_start = addr;
  74. if (addr + PAGE_SIZE > tlb->range_end)
  75. tlb->range_end = addr + PAGE_SIZE;
  76. }
  77. }
  78. static inline void __tlb_alloc_page(struct mmu_gather *tlb)
  79. {
  80. unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
  81. if (addr) {
  82. tlb->pages = (void *)addr;
  83. tlb->max = PAGE_SIZE / sizeof(struct page *);
  84. }
  85. }
  86. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  87. {
  88. tlb_flush(tlb);
  89. }
  90. static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
  91. {
  92. free_pages_and_swap_cache(tlb->pages, tlb->nr);
  93. tlb->nr = 0;
  94. if (tlb->pages == tlb->local)
  95. __tlb_alloc_page(tlb);
  96. }
  97. static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  98. {
  99. tlb_flush_mmu_tlbonly(tlb);
  100. tlb_flush_mmu_free(tlb);
  101. }
  102. static inline void
  103. tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  104. {
  105. tlb->mm = mm;
  106. tlb->fullmm = !(start | (end+1));
  107. tlb->start = start;
  108. tlb->end = end;
  109. tlb->vma = NULL;
  110. tlb->max = ARRAY_SIZE(tlb->local);
  111. tlb->pages = tlb->local;
  112. tlb->nr = 0;
  113. __tlb_alloc_page(tlb);
  114. }
  115. static inline void
  116. tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  117. {
  118. tlb_flush_mmu(tlb);
  119. /* keep the page table cache within bounds */
  120. check_pgt_cache();
  121. if (tlb->pages != tlb->local)
  122. free_pages((unsigned long)tlb->pages, 0);
  123. }
  124. /*
  125. * Memorize the range for the TLB flush.
  126. */
  127. static inline void
  128. tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  129. {
  130. tlb_add_flush(tlb, addr);
  131. }
  132. /*
  133. * In the case of tlb vma handling, we can optimise these away in the
  134. * case where we're doing a full MM flush. When we're doing a munmap,
  135. * the vmas are adjusted to only cover the region to be torn down.
  136. */
  137. static inline void
  138. tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  139. {
  140. if (!tlb->fullmm) {
  141. flush_cache_range(vma, vma->vm_start, vma->vm_end);
  142. tlb->vma = vma;
  143. tlb->range_start = TASK_SIZE;
  144. tlb->range_end = 0;
  145. }
  146. }
  147. static inline void
  148. tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
  149. {
  150. if (!tlb->fullmm)
  151. tlb_flush(tlb);
  152. }
  153. static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  154. {
  155. tlb->pages[tlb->nr++] = page;
  156. VM_BUG_ON(tlb->nr > tlb->max);
  157. return tlb->max - tlb->nr;
  158. }
  159. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  160. {
  161. if (!__tlb_remove_page(tlb, page))
  162. tlb_flush_mmu(tlb);
  163. }
  164. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  165. unsigned long addr)
  166. {
  167. pgtable_page_dtor(pte);
  168. #ifdef CONFIG_ARM_LPAE
  169. tlb_add_flush(tlb, addr);
  170. #else
  171. /*
  172. * With the classic ARM MMU, a pte page has two corresponding pmd
  173. * entries, each covering 1MB.
  174. */
  175. addr &= PMD_MASK;
  176. tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
  177. tlb_add_flush(tlb, addr + SZ_1M);
  178. #endif
  179. tlb_remove_page(tlb, pte);
  180. }
  181. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
  182. unsigned long addr)
  183. {
  184. #ifdef CONFIG_ARM_LPAE
  185. tlb_add_flush(tlb, addr);
  186. tlb_remove_page(tlb, virt_to_page(pmdp));
  187. #endif
  188. }
  189. static inline void
  190. tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
  191. {
  192. tlb_add_flush(tlb, addr);
  193. }
  194. #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  195. #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  196. #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
  197. #define tlb_migrate_finish(mm) do { } while (0)
  198. #endif /* CONFIG_MMU */
  199. #endif