tlb.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _S390_TLB_H
  3. #define _S390_TLB_H
  4. /*
  5. * TLB flushing on s390 is complicated. The following requirement
  6. * from the principles of operation is the most arduous:
  7. *
  8. * "A valid table entry must not be changed while it is attached
  9. * to any CPU and may be used for translation by that CPU except to
  10. * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
  11. * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
  12. * table entry, or (3) make a change by means of a COMPARE AND SWAP
  13. * AND PURGE instruction that purges the TLB."
  14. *
  15. * The modification of a pte of an active mm struct therefore is
  16. * a two step process: i) invalidate the pte, ii) store the new pte.
  17. * This is true for the page protection bit as well.
  18. * The only possible optimization is to flush at the beginning of
  19. * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
  20. *
  21. * Pages used for the page tables is a different story. FIXME: more
  22. */
  23. #include <linux/mm.h>
  24. #include <linux/pagemap.h>
  25. #include <linux/swap.h>
  26. #include <asm/processor.h>
  27. #include <asm/pgalloc.h>
  28. #include <asm/tlbflush.h>
  29. struct mmu_gather {
  30. struct mm_struct *mm;
  31. struct mmu_table_batch *batch;
  32. unsigned int fullmm;
  33. unsigned long start, end;
  34. };
  35. struct mmu_table_batch {
  36. struct rcu_head rcu;
  37. unsigned int nr;
  38. void *tables[0];
  39. };
  40. #define MAX_TABLE_BATCH \
  41. ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
  42. extern void tlb_table_flush(struct mmu_gather *tlb);
  43. extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
  44. static inline void
  45. arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
  46. unsigned long start, unsigned long end)
  47. {
  48. tlb->mm = mm;
  49. tlb->start = start;
  50. tlb->end = end;
  51. tlb->fullmm = !(start | (end+1));
  52. tlb->batch = NULL;
  53. }
  54. static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
  55. {
  56. __tlb_flush_mm_lazy(tlb->mm);
  57. }
  58. static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
  59. {
  60. tlb_table_flush(tlb);
  61. }
  62. static inline void tlb_flush_mmu(struct mmu_gather *tlb)
  63. {
  64. tlb_flush_mmu_tlbonly(tlb);
  65. tlb_flush_mmu_free(tlb);
  66. }
  67. static inline void
  68. arch_tlb_finish_mmu(struct mmu_gather *tlb,
  69. unsigned long start, unsigned long end, bool force)
  70. {
  71. if (force) {
  72. tlb->start = start;
  73. tlb->end = end;
  74. }
  75. tlb_flush_mmu(tlb);
  76. }
  77. /*
  78. * Release the page cache reference for a pte removed by
  79. * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
  80. * has already been freed, so just do free_page_and_swap_cache.
  81. */
  82. static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  83. {
  84. free_page_and_swap_cache(page);
  85. return false; /* avoid calling tlb_flush_mmu */
  86. }
  87. static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  88. {
  89. free_page_and_swap_cache(page);
  90. }
  91. static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
  92. struct page *page, int page_size)
  93. {
  94. return __tlb_remove_page(tlb, page);
  95. }
  96. static inline void tlb_remove_page_size(struct mmu_gather *tlb,
  97. struct page *page, int page_size)
  98. {
  99. return tlb_remove_page(tlb, page);
  100. }
  101. /*
  102. * pte_free_tlb frees a pte table and clears the CRSTE for the
  103. * page table from the tlb.
  104. */
  105. static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
  106. unsigned long address)
  107. {
  108. page_table_free_rcu(tlb, (unsigned long *) pte, address);
  109. }
  110. /*
  111. * pmd_free_tlb frees a pmd table and clears the CRSTE for the
  112. * segment table entry from the tlb.
  113. * If the mm uses a two level page table the single pmd is freed
  114. * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
  115. * to avoid the double free of the pmd in this case.
  116. */
  117. static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
  118. unsigned long address)
  119. {
  120. if (mm_pmd_folded(tlb->mm))
  121. return;
  122. pgtable_pmd_page_dtor(virt_to_page(pmd));
  123. tlb_remove_table(tlb, pmd);
  124. }
  125. /*
  126. * p4d_free_tlb frees a pud table and clears the CRSTE for the
  127. * region second table entry from the tlb.
  128. * If the mm uses a four level page table the single p4d is freed
  129. * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
  130. * to avoid the double free of the p4d in this case.
  131. */
  132. static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
  133. unsigned long address)
  134. {
  135. if (mm_p4d_folded(tlb->mm))
  136. return;
  137. tlb_remove_table(tlb, p4d);
  138. }
  139. /*
  140. * pud_free_tlb frees a pud table and clears the CRSTE for the
  141. * region third table entry from the tlb.
  142. * If the mm uses a three level page table the single pud is freed
  143. * as the pgd. pud_free_tlb checks the asce_limit against 4TB
  144. * to avoid the double free of the pud in this case.
  145. */
  146. static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
  147. unsigned long address)
  148. {
  149. if (mm_pud_folded(tlb->mm))
  150. return;
  151. tlb_remove_table(tlb, pud);
  152. }
  153. #define tlb_start_vma(tlb, vma) do { } while (0)
  154. #define tlb_end_vma(tlb, vma) do { } while (0)
  155. #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
  156. #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
  157. #define tlb_migrate_finish(mm) do { } while (0)
  158. #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
  159. tlb_remove_tlb_entry(tlb, ptep, address)
  160. #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
  161. static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  162. unsigned int page_size)
  163. {
  164. }
  165. #endif /* _S390_TLB_H */