tlbflush.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. #ifndef _S390_TLBFLUSH_H
  2. #define _S390_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/pgalloc.h>
  7. #include <asm/pgtable.h>
  8. /*
  9. * Flush all TLB entries on the local CPU.
  10. */
  11. static inline void __tlb_flush_local(void)
  12. {
  13. asm volatile("ptlb" : : : "memory");
  14. }
  15. /*
  16. * Flush TLB entries for a specific ASCE on all CPUs
  17. */
  18. static inline void __tlb_flush_idte(unsigned long asce)
  19. {
  20. /* Global TLB flush for the mm */
  21. asm volatile(
  22. " .insn rrf,0xb98e0000,0,%0,%1,0"
  23. : : "a" (2048), "a" (asce) : "cc");
  24. }
  25. #ifdef CONFIG_SMP
  26. void smp_ptlb_all(void);
  27. /*
  28. * Flush all TLB entries on all CPUs.
  29. */
  30. static inline void __tlb_flush_global(void)
  31. {
  32. unsigned int dummy = 0;
  33. csp(&dummy, 0, 0);
  34. }
  35. /*
  36. * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  37. * this implicates multiple ASCEs!).
  38. */
  39. static inline void __tlb_flush_full(struct mm_struct *mm)
  40. {
  41. preempt_disable();
  42. atomic_inc(&mm->context.flush_count);
  43. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  44. /* Local TLB flush */
  45. __tlb_flush_local();
  46. } else {
  47. /* Global TLB flush */
  48. __tlb_flush_global();
  49. /* Reset TLB flush mask */
  50. cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  51. }
  52. atomic_dec(&mm->context.flush_count);
  53. preempt_enable();
  54. }
  55. static inline void __tlb_flush_mm(struct mm_struct *mm)
  56. {
  57. unsigned long gmap_asce;
  58. /*
  59. * If the machine has IDTE we prefer to do a per mm flush
  60. * on all cpus instead of doing a local flush if the mm
  61. * only ran on the local cpu.
  62. */
  63. preempt_disable();
  64. atomic_inc(&mm->context.flush_count);
  65. gmap_asce = READ_ONCE(mm->context.gmap_asce);
  66. if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
  67. if (gmap_asce)
  68. __tlb_flush_idte(gmap_asce);
  69. __tlb_flush_idte(mm->context.asce);
  70. } else {
  71. __tlb_flush_full(mm);
  72. }
  73. /* Reset TLB flush mask */
  74. cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  75. atomic_dec(&mm->context.flush_count);
  76. preempt_enable();
  77. }
  78. static inline void __tlb_flush_kernel(void)
  79. {
  80. if (MACHINE_HAS_IDTE)
  81. __tlb_flush_idte(init_mm.context.asce);
  82. else
  83. __tlb_flush_global();
  84. }
  85. #else
  86. #define __tlb_flush_global() __tlb_flush_local()
  87. #define __tlb_flush_full(mm) __tlb_flush_local()
  88. /*
  89. * Flush TLB entries for a specific ASCE on all CPUs.
  90. */
  91. static inline void __tlb_flush_mm(struct mm_struct *mm)
  92. {
  93. __tlb_flush_local();
  94. }
  95. static inline void __tlb_flush_kernel(void)
  96. {
  97. __tlb_flush_local();
  98. }
  99. #endif
  100. static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  101. {
  102. if (mm->context.flush_mm) {
  103. __tlb_flush_mm(mm);
  104. mm->context.flush_mm = 0;
  105. }
  106. }
  107. /*
  108. * TLB flushing:
  109. * flush_tlb() - flushes the current mm struct TLBs
  110. * flush_tlb_all() - flushes all processes TLBs
  111. * flush_tlb_mm(mm) - flushes the specified mm context TLB's
  112. * flush_tlb_page(vma, vmaddr) - flushes one page
  113. * flush_tlb_range(vma, start, end) - flushes a range of pages
  114. * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
  115. */
  116. /*
  117. * flush_tlb_mm goes together with ptep_set_wrprotect for the
  118. * copy_page_range operation and flush_tlb_range is related to
  119. * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
  120. * ptep_get_and_clear do not flush the TLBs directly if the mm has
  121. * only one user. At the end of the update the flush_tlb_mm and
  122. * flush_tlb_range functions need to do the flush.
  123. */
  124. #define flush_tlb() do { } while (0)
  125. #define flush_tlb_all() do { } while (0)
  126. #define flush_tlb_page(vma, addr) do { } while (0)
  127. static inline void flush_tlb_mm(struct mm_struct *mm)
  128. {
  129. __tlb_flush_mm_lazy(mm);
  130. }
  131. static inline void flush_tlb_range(struct vm_area_struct *vma,
  132. unsigned long start, unsigned long end)
  133. {
  134. __tlb_flush_mm_lazy(vma->vm_mm);
  135. }
  136. static inline void flush_tlb_kernel_range(unsigned long start,
  137. unsigned long end)
  138. {
  139. __tlb_flush_kernel();
  140. }
  141. #endif /* _S390_TLBFLUSH_H */