tlbflush.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. #ifndef _S390_TLBFLUSH_H
  2. #define _S390_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/pgalloc.h>
  7. /*
  8. * Flush all TLB entries on the local CPU.
  9. */
  10. static inline void __tlb_flush_local(void)
  11. {
  12. asm volatile("ptlb" : : : "memory");
  13. }
  14. /*
  15. * Flush TLB entries for a specific ASCE on all CPUs
  16. */
  17. static inline void __tlb_flush_idte(unsigned long asce)
  18. {
  19. /* Global TLB flush for the mm */
  20. asm volatile(
  21. " .insn rrf,0xb98e0000,0,%0,%1,0"
  22. : : "a" (2048), "a" (asce) : "cc");
  23. }
  24. /*
  25. * Flush TLB entries for a specific ASCE on the local CPU
  26. */
  27. static inline void __tlb_flush_idte_local(unsigned long asce)
  28. {
  29. /* Local TLB flush for the mm */
  30. asm volatile(
  31. " .insn rrf,0xb98e0000,0,%0,%1,1"
  32. : : "a" (2048), "a" (asce) : "cc");
  33. }
  34. #ifdef CONFIG_SMP
  35. void smp_ptlb_all(void);
  36. /*
  37. * Flush all TLB entries on all CPUs.
  38. */
  39. static inline void __tlb_flush_global(void)
  40. {
  41. register unsigned long reg2 asm("2");
  42. register unsigned long reg3 asm("3");
  43. register unsigned long reg4 asm("4");
  44. long dummy;
  45. #ifndef CONFIG_64BIT
  46. if (!MACHINE_HAS_CSP) {
  47. smp_ptlb_all();
  48. return;
  49. }
  50. #endif /* CONFIG_64BIT */
  51. dummy = 0;
  52. reg2 = reg3 = 0;
  53. reg4 = ((unsigned long) &dummy) + 1;
  54. asm volatile(
  55. " csp %0,%2"
  56. : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
  57. }
  58. /*
  59. * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  60. * this implicates multiple ASCEs!).
  61. */
  62. static inline void __tlb_flush_full(struct mm_struct *mm)
  63. {
  64. preempt_disable();
  65. atomic_add(0x10000, &mm->context.attach_count);
  66. if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  67. /* Local TLB flush */
  68. __tlb_flush_local();
  69. } else {
  70. /* Global TLB flush */
  71. __tlb_flush_global();
  72. /* Reset TLB flush mask */
  73. if (MACHINE_HAS_TLB_LC)
  74. cpumask_copy(mm_cpumask(mm),
  75. &mm->context.cpu_attach_mask);
  76. }
  77. atomic_sub(0x10000, &mm->context.attach_count);
  78. preempt_enable();
  79. }
  80. /*
  81. * Flush TLB entries for a specific ASCE on all CPUs.
  82. */
  83. static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
  84. {
  85. int active, count;
  86. preempt_disable();
  87. active = (mm == current->active_mm) ? 1 : 0;
  88. count = atomic_add_return(0x10000, &mm->context.attach_count);
  89. if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
  90. cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
  91. __tlb_flush_idte_local(asce);
  92. } else {
  93. if (MACHINE_HAS_IDTE)
  94. __tlb_flush_idte(asce);
  95. else
  96. __tlb_flush_global();
  97. /* Reset TLB flush mask */
  98. if (MACHINE_HAS_TLB_LC)
  99. cpumask_copy(mm_cpumask(mm),
  100. &mm->context.cpu_attach_mask);
  101. }
  102. atomic_sub(0x10000, &mm->context.attach_count);
  103. preempt_enable();
  104. }
  105. static inline void __tlb_flush_kernel(void)
  106. {
  107. if (MACHINE_HAS_IDTE)
  108. __tlb_flush_idte((unsigned long) init_mm.pgd |
  109. init_mm.context.asce_bits);
  110. else
  111. __tlb_flush_global();
  112. }
  113. #else
  114. #define __tlb_flush_global() __tlb_flush_local()
  115. #define __tlb_flush_full(mm) __tlb_flush_local()
  116. /*
  117. * Flush TLB entries for a specific ASCE on all CPUs.
  118. */
  119. static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
  120. {
  121. if (MACHINE_HAS_TLB_LC)
  122. __tlb_flush_idte_local(asce);
  123. else
  124. __tlb_flush_local();
  125. }
  126. static inline void __tlb_flush_kernel(void)
  127. {
  128. if (MACHINE_HAS_TLB_LC)
  129. __tlb_flush_idte_local((unsigned long) init_mm.pgd |
  130. init_mm.context.asce_bits);
  131. else
  132. __tlb_flush_local();
  133. }
  134. #endif
  135. static inline void __tlb_flush_mm(struct mm_struct * mm)
  136. {
  137. /*
  138. * If the machine has IDTE we prefer to do a per mm flush
  139. * on all cpus instead of doing a local flush if the mm
  140. * only ran on the local cpu.
  141. */
  142. if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
  143. __tlb_flush_asce(mm, (unsigned long) mm->pgd |
  144. mm->context.asce_bits);
  145. else
  146. __tlb_flush_full(mm);
  147. }
  148. static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  149. {
  150. if (mm->context.flush_mm) {
  151. __tlb_flush_mm(mm);
  152. mm->context.flush_mm = 0;
  153. }
  154. }
  155. /*
  156. * TLB flushing:
  157. * flush_tlb() - flushes the current mm struct TLBs
  158. * flush_tlb_all() - flushes all processes TLBs
  159. * flush_tlb_mm(mm) - flushes the specified mm context TLB's
  160. * flush_tlb_page(vma, vmaddr) - flushes one page
  161. * flush_tlb_range(vma, start, end) - flushes a range of pages
  162. * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
  163. */
  164. /*
  165. * flush_tlb_mm goes together with ptep_set_wrprotect for the
  166. * copy_page_range operation and flush_tlb_range is related to
  167. * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
  168. * ptep_get_and_clear do not flush the TLBs directly if the mm has
  169. * only one user. At the end of the update the flush_tlb_mm and
  170. * flush_tlb_range functions need to do the flush.
  171. */
  172. #define flush_tlb() do { } while (0)
  173. #define flush_tlb_all() do { } while (0)
  174. #define flush_tlb_page(vma, addr) do { } while (0)
  175. static inline void flush_tlb_mm(struct mm_struct *mm)
  176. {
  177. __tlb_flush_mm_lazy(mm);
  178. }
  179. static inline void flush_tlb_range(struct vm_area_struct *vma,
  180. unsigned long start, unsigned long end)
  181. {
  182. __tlb_flush_mm_lazy(vma->vm_mm);
  183. }
  184. static inline void flush_tlb_kernel_range(unsigned long start,
  185. unsigned long end)
  186. {
  187. __tlb_flush_kernel();
  188. }
  189. #endif /* _S390_TLBFLUSH_H */