tlbflush.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #ifndef _ASM_X86_TLBFLUSH_H
  2. #define _ASM_X86_TLBFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/sched.h>
  5. #include <asm/processor.h>
  6. #include <asm/cpufeature.h>
  7. #include <asm/special_insns.h>
  8. #ifdef CONFIG_PARAVIRT
  9. #include <asm/paravirt.h>
  10. #else
  11. #define __flush_tlb() __native_flush_tlb()
  12. #define __flush_tlb_global() __native_flush_tlb_global()
  13. #define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
  14. #endif
  15. struct tlb_state {
  16. #ifdef CONFIG_SMP
  17. struct mm_struct *active_mm;
  18. int state;
  19. #endif
  20. /*
  21. * Access to this CR4 shadow and to H/W CR4 is protected by
  22. * disabling interrupts when modifying either one.
  23. */
  24. unsigned long cr4;
  25. };
  26. DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
  27. /* Initialize cr4 shadow for this CPU. */
  28. static inline void cr4_init_shadow(void)
  29. {
  30. this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
  31. }
  32. /* Set in this cpu's CR4. */
  33. static inline void cr4_set_bits(unsigned long mask)
  34. {
  35. unsigned long cr4;
  36. cr4 = this_cpu_read(cpu_tlbstate.cr4);
  37. if ((cr4 | mask) != cr4) {
  38. cr4 |= mask;
  39. this_cpu_write(cpu_tlbstate.cr4, cr4);
  40. __write_cr4(cr4);
  41. }
  42. }
  43. /* Clear in this cpu's CR4. */
  44. static inline void cr4_clear_bits(unsigned long mask)
  45. {
  46. unsigned long cr4;
  47. cr4 = this_cpu_read(cpu_tlbstate.cr4);
  48. if ((cr4 & ~mask) != cr4) {
  49. cr4 &= ~mask;
  50. this_cpu_write(cpu_tlbstate.cr4, cr4);
  51. __write_cr4(cr4);
  52. }
  53. }
  54. /* Read the CR4 shadow. */
  55. static inline unsigned long cr4_read_shadow(void)
  56. {
  57. return this_cpu_read(cpu_tlbstate.cr4);
  58. }
  59. /*
  60. * Save some of cr4 feature set we're using (e.g. Pentium 4MB
  61. * enable and PPro Global page enable), so that any CPU's that boot
  62. * up after us can get the correct flags. This should only be used
  63. * during boot on the boot cpu.
  64. */
  65. extern unsigned long mmu_cr4_features;
  66. extern u32 *trampoline_cr4_features;
  67. static inline void cr4_set_bits_and_update_boot(unsigned long mask)
  68. {
  69. mmu_cr4_features |= mask;
  70. if (trampoline_cr4_features)
  71. *trampoline_cr4_features = mmu_cr4_features;
  72. cr4_set_bits(mask);
  73. }
  74. static inline void __native_flush_tlb(void)
  75. {
  76. native_write_cr3(native_read_cr3());
  77. }
  78. static inline void __native_flush_tlb_global_irq_disabled(void)
  79. {
  80. unsigned long cr4;
  81. cr4 = this_cpu_read(cpu_tlbstate.cr4);
  82. /* clear PGE */
  83. native_write_cr4(cr4 & ~X86_CR4_PGE);
  84. /* write old PGE again and flush TLBs */
  85. native_write_cr4(cr4);
  86. }
  87. static inline void __native_flush_tlb_global(void)
  88. {
  89. unsigned long flags;
  90. /*
  91. * Read-modify-write to CR4 - protect it from preemption and
  92. * from interrupts. (Use the raw variant because this code can
  93. * be called from deep inside debugging code.)
  94. */
  95. raw_local_irq_save(flags);
  96. __native_flush_tlb_global_irq_disabled();
  97. raw_local_irq_restore(flags);
  98. }
  99. static inline void __native_flush_tlb_single(unsigned long addr)
  100. {
  101. asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
  102. }
  103. static inline void __flush_tlb_all(void)
  104. {
  105. if (cpu_has_pge)
  106. __flush_tlb_global();
  107. else
  108. __flush_tlb();
  109. }
  110. static inline void __flush_tlb_one(unsigned long addr)
  111. {
  112. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
  113. __flush_tlb_single(addr);
  114. }
  115. #define TLB_FLUSH_ALL -1UL
  116. /*
  117. * TLB flushing:
  118. *
  119. * - flush_tlb() flushes the current mm struct TLBs
  120. * - flush_tlb_all() flushes all processes TLBs
  121. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  122. * - flush_tlb_page(vma, vmaddr) flushes one page
  123. * - flush_tlb_range(vma, start, end) flushes a range of pages
  124. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  125. * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
  126. *
  127. * ..but the i386 has somewhat limited tlb flushing capabilities,
  128. * and page-granular flushes are available only on i486 and up.
  129. */
  130. #ifndef CONFIG_SMP
  131. /* "_up" is for UniProcessor.
  132. *
  133. * This is a helper for other header functions. *Not* intended to be called
  134. * directly. All global TLB flushes need to either call this, or to bump the
  135. * vm statistics themselves.
  136. */
  137. static inline void __flush_tlb_up(void)
  138. {
  139. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
  140. __flush_tlb();
  141. }
  142. static inline void flush_tlb_all(void)
  143. {
  144. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
  145. __flush_tlb_all();
  146. }
  147. static inline void flush_tlb(void)
  148. {
  149. __flush_tlb_up();
  150. }
  151. static inline void local_flush_tlb(void)
  152. {
  153. __flush_tlb_up();
  154. }
  155. static inline void flush_tlb_mm(struct mm_struct *mm)
  156. {
  157. if (mm == current->active_mm)
  158. __flush_tlb_up();
  159. }
  160. static inline void flush_tlb_page(struct vm_area_struct *vma,
  161. unsigned long addr)
  162. {
  163. if (vma->vm_mm == current->active_mm)
  164. __flush_tlb_one(addr);
  165. }
  166. static inline void flush_tlb_range(struct vm_area_struct *vma,
  167. unsigned long start, unsigned long end)
  168. {
  169. if (vma->vm_mm == current->active_mm)
  170. __flush_tlb_up();
  171. }
  172. static inline void flush_tlb_mm_range(struct mm_struct *mm,
  173. unsigned long start, unsigned long end, unsigned long vmflag)
  174. {
  175. if (mm == current->active_mm)
  176. __flush_tlb_up();
  177. }
  178. static inline void native_flush_tlb_others(const struct cpumask *cpumask,
  179. struct mm_struct *mm,
  180. unsigned long start,
  181. unsigned long end)
  182. {
  183. }
  184. static inline void reset_lazy_tlbstate(void)
  185. {
  186. }
  187. static inline void flush_tlb_kernel_range(unsigned long start,
  188. unsigned long end)
  189. {
  190. flush_tlb_all();
  191. }
  192. #else /* SMP */
  193. #include <asm/smp.h>
  194. #define local_flush_tlb() __flush_tlb()
  195. #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
  196. #define flush_tlb_range(vma, start, end) \
  197. flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
  198. extern void flush_tlb_all(void);
  199. extern void flush_tlb_current_task(void);
  200. extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
  201. extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  202. unsigned long end, unsigned long vmflag);
  203. extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  204. #define flush_tlb() flush_tlb_current_task()
  205. void native_flush_tlb_others(const struct cpumask *cpumask,
  206. struct mm_struct *mm,
  207. unsigned long start, unsigned long end);
  208. #define TLBSTATE_OK 1
  209. #define TLBSTATE_LAZY 2
  210. static inline void reset_lazy_tlbstate(void)
  211. {
  212. this_cpu_write(cpu_tlbstate.state, 0);
  213. this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
  214. }
  215. #endif /* SMP */
  216. /* Not inlined due to inc_irq_stat not being defined yet */
  217. #define flush_tlb_local() { \
  218. inc_irq_stat(irq_tlb_count); \
  219. local_flush_tlb(); \
  220. }
  221. #ifndef CONFIG_PARAVIRT
  222. #define flush_tlb_others(mask, mm, start, end) \
  223. native_flush_tlb_others(mask, mm, start, end)
  224. #endif
  225. #endif /* _ASM_X86_TLBFLUSH_H */