tlbflush.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. #ifndef _S390_TLBFLUSH_H
  2. #define _S390_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/pgalloc.h>
  7. /*
  8. * Flush all TLB entries on the local CPU.
  9. */
  10. static inline void __tlb_flush_local(void)
  11. {
  12. asm volatile("ptlb" : : : "memory");
  13. }
  14. /*
  15. * Flush TLB entries for a specific ASCE on all CPUs
  16. */
  17. static inline void __tlb_flush_idte(unsigned long asce)
  18. {
  19. /* Global TLB flush for the mm */
  20. asm volatile(
  21. " .insn rrf,0xb98e0000,0,%0,%1,0"
  22. : : "a" (2048), "a" (asce) : "cc");
  23. }
  24. /*
  25. * Flush TLB entries for a specific ASCE on the local CPU
  26. */
  27. static inline void __tlb_flush_idte_local(unsigned long asce)
  28. {
  29. /* Local TLB flush for the mm */
  30. asm volatile(
  31. " .insn rrf,0xb98e0000,0,%0,%1,1"
  32. : : "a" (2048), "a" (asce) : "cc");
  33. }
  34. #ifdef CONFIG_SMP
  35. void smp_ptlb_all(void);
  36. /*
  37. * Flush all TLB entries on all CPUs.
  38. */
  39. static inline void __tlb_flush_global(void)
  40. {
  41. register unsigned long reg2 asm("2");
  42. register unsigned long reg3 asm("3");
  43. register unsigned long reg4 asm("4");
  44. long dummy;
  45. dummy = 0;
  46. reg2 = reg3 = 0;
  47. reg4 = ((unsigned long) &dummy) + 1;
  48. asm volatile(
  49. " csp %0,%2"
  50. : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
  51. }
  52. /*
  53. * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  54. * this implicates multiple ASCEs!).
  55. */
  56. static inline void __tlb_flush_full(struct mm_struct *mm)
  57. {
  58. preempt_disable();
  59. atomic_add(0x10000, &mm->context.attach_count);
  60. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  61. /* Local TLB flush */
  62. __tlb_flush_local();
  63. } else {
  64. /* Global TLB flush */
  65. __tlb_flush_global();
  66. /* Reset TLB flush mask */
  67. if (MACHINE_HAS_TLB_LC)
  68. cpumask_copy(mm_cpumask(mm),
  69. &mm->context.cpu_attach_mask);
  70. }
  71. atomic_sub(0x10000, &mm->context.attach_count);
  72. preempt_enable();
  73. }
  74. /*
  75. * Flush TLB entries for a specific ASCE on all CPUs.
  76. */
  77. static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
  78. {
  79. int active, count;
  80. preempt_disable();
  81. active = (mm == current->active_mm) ? 1 : 0;
  82. count = atomic_add_return(0x10000, &mm->context.attach_count);
  83. if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
  84. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  85. __tlb_flush_idte_local(asce);
  86. } else {
  87. if (MACHINE_HAS_IDTE)
  88. __tlb_flush_idte(asce);
  89. else
  90. __tlb_flush_global();
  91. /* Reset TLB flush mask */
  92. if (MACHINE_HAS_TLB_LC)
  93. cpumask_copy(mm_cpumask(mm),
  94. &mm->context.cpu_attach_mask);
  95. }
  96. atomic_sub(0x10000, &mm->context.attach_count);
  97. preempt_enable();
  98. }
  99. static inline void __tlb_flush_kernel(void)
  100. {
  101. if (MACHINE_HAS_IDTE)
  102. __tlb_flush_idte(init_mm.context.asce);
  103. else
  104. __tlb_flush_global();
  105. }
  106. #else
  107. #define __tlb_flush_global() __tlb_flush_local()
  108. #define __tlb_flush_full(mm) __tlb_flush_local()
  109. /*
  110. * Flush TLB entries for a specific ASCE on all CPUs.
  111. */
  112. static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
  113. {
  114. if (MACHINE_HAS_TLB_LC)
  115. __tlb_flush_idte_local(asce);
  116. else
  117. __tlb_flush_local();
  118. }
  119. static inline void __tlb_flush_kernel(void)
  120. {
  121. if (MACHINE_HAS_TLB_LC)
  122. __tlb_flush_idte_local(init_mm.context.asce);
  123. else
  124. __tlb_flush_local();
  125. }
  126. #endif
  127. static inline void __tlb_flush_mm(struct mm_struct * mm)
  128. {
  129. /*
  130. * If the machine has IDTE we prefer to do a per mm flush
  131. * on all cpus instead of doing a local flush if the mm
  132. * only ran on the local cpu.
  133. */
  134. if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
  135. __tlb_flush_asce(mm, mm->context.asce);
  136. else
  137. __tlb_flush_full(mm);
  138. }
  139. static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  140. {
  141. if (mm->context.flush_mm) {
  142. __tlb_flush_mm(mm);
  143. mm->context.flush_mm = 0;
  144. }
  145. }
  146. /*
  147. * TLB flushing:
  148. * flush_tlb() - flushes the current mm struct TLBs
  149. * flush_tlb_all() - flushes all processes TLBs
  150. * flush_tlb_mm(mm) - flushes the specified mm context TLB's
  151. * flush_tlb_page(vma, vmaddr) - flushes one page
  152. * flush_tlb_range(vma, start, end) - flushes a range of pages
  153. * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
  154. */
  155. /*
  156. * flush_tlb_mm goes together with ptep_set_wrprotect for the
  157. * copy_page_range operation and flush_tlb_range is related to
  158. * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
  159. * ptep_get_and_clear do not flush the TLBs directly if the mm has
  160. * only one user. At the end of the update the flush_tlb_mm and
  161. * flush_tlb_range functions need to do the flush.
  162. */
  163. #define flush_tlb() do { } while (0)
  164. #define flush_tlb_all() do { } while (0)
  165. #define flush_tlb_page(vma, addr) do { } while (0)
  166. static inline void flush_tlb_mm(struct mm_struct *mm)
  167. {
  168. __tlb_flush_mm_lazy(mm);
  169. }
  170. static inline void flush_tlb_range(struct vm_area_struct *vma,
  171. unsigned long start, unsigned long end)
  172. {
  173. __tlb_flush_mm_lazy(vma->vm_mm);
  174. }
  175. static inline void flush_tlb_kernel_range(unsigned long start,
  176. unsigned long end)
  177. {
  178. __tlb_flush_kernel();
  179. }
  180. #endif /* _S390_TLBFLUSH_H */