tlb.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * Based on arch/arm/include/asm/tlb.h
  3. *
  4. * Copyright (C) 2002 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef __ASM_TLB_H
  20. #define __ASM_TLB_H
  21. #define __tlb_remove_pmd_tlb_entry __tlb_remove_pmd_tlb_entry
  22. #include <asm-generic/tlb.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/swap.h>
  25. #ifdef CONFIG_HAVE_RCU_TABLE_FREE
  26. #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
  27. static inline void __tlb_remove_table(void *_table)
  28. {
  29. free_page_and_swap_cache((struct page *)_table);
  30. }
  31. #else
  32. #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
  33. #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
  34. /*
  35. * There's three ways the TLB shootdown code is used:
  36. * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
  37. * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
  38. * 2. Unmapping all vmas. See exit_mmap().
  39. * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
  40. * Page tables will be freed.
  41. * 3. Unmapping argument pages. See shift_arg_pages().
  42. * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
  43. */
  44. static inline void tlb_flush(struct mmu_gather *tlb)
  45. {
  46. if (tlb->fullmm) {
  47. flush_tlb_mm(tlb->mm);
  48. } else if (tlb->end > 0) {
  49. struct vm_area_struct vma = { .vm_mm = tlb->mm, };
  50. flush_tlb_range(&vma, tlb->start, tlb->end);
  51. tlb->start = TASK_SIZE;
  52. tlb->end = 0;
  53. }
  54. }
  55. static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
  56. {
  57. if (!tlb->fullmm) {
  58. tlb->start = min(tlb->start, addr);
  59. tlb->end = max(tlb->end, addr + PAGE_SIZE);
  60. }
  61. }
  62. /*
  63. * Memorize the range for the TLB flush.
  64. */
  65. static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  66. unsigned long addr)
  67. {
  68. tlb_add_flush(tlb, addr);
  69. }
  70. /*
  71. * In the case of tlb vma handling, we can optimise these away in the
  72. * case where we're doing a full MM flush. When we're doing a munmap,
  73. * the vmas are adjusted to only cover the region to be torn down.
  74. */
  75. static inline void tlb_start_vma(struct mmu_gather *tlb,
  76. struct vm_area_struct *vma)
  77. {
  78. if (!tlb->fullmm) {
  79. tlb->start = TASK_SIZE;
  80. tlb->end = 0;
  81. }
  82. }
  83. static inline void tlb_end_vma(struct mmu_gather *tlb,
  84. struct vm_area_struct *vma)
  85. {
  86. if (!tlb->fullmm)
  87. tlb_flush(tlb);
  88. }
  89. static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  90. unsigned long addr)
  91. {
  92. pgtable_page_dtor(pte);
  93. tlb_add_flush(tlb, addr);
  94. tlb_remove_entry(tlb, pte);
  95. }
  96. #if CONFIG_ARM64_PGTABLE_LEVELS > 2
  97. static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
  98. unsigned long addr)
  99. {
  100. tlb_add_flush(tlb, addr);
  101. tlb_remove_entry(tlb, virt_to_page(pmdp));
  102. }
  103. #endif
  104. #if CONFIG_ARM64_PGTABLE_LEVELS > 3
  105. static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
  106. unsigned long addr)
  107. {
  108. tlb_add_flush(tlb, addr);
  109. tlb_remove_entry(tlb, virt_to_page(pudp));
  110. }
  111. #endif
  112. static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
  113. unsigned long address)
  114. {
  115. tlb_add_flush(tlb, address);
  116. }
  117. #endif