tlb.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/smp.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/export.h>
  7. #include <linux/cpu.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/mmu_context.h>
  10. #include <asm/cache.h>
  11. #include <asm/apic.h>
  12. #include <asm/uv/uv.h>
  13. #include <linux/debugfs.h>
  14. /*
  15. * Smarter SMP flushing macros.
  16. * c/o Linus Torvalds.
  17. *
  18. * These mean you can really definitely utterly forget about
  19. * writing to user space from interrupts. (Its not allowed anyway).
  20. *
  21. * Optimizations Manfred Spraul <manfred@colorfullife.com>
  22. *
  23. * More scalable flush, from Andi Kleen
  24. *
  25. * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  26. */
  27. #ifdef CONFIG_SMP
  28. struct flush_tlb_info {
  29. struct mm_struct *flush_mm;
  30. unsigned long flush_start;
  31. unsigned long flush_end;
  32. };
  33. /*
  34. * We cannot call mmdrop() because we are in interrupt context,
  35. * instead update mm->cpu_vm_mask.
  36. */
  37. void leave_mm(int cpu)
  38. {
  39. struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
  40. if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  41. BUG();
  42. if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
  43. cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
  44. load_cr3(swapper_pg_dir);
  45. /*
  46. * This gets called in the idle path where RCU
  47. * functions differently. Tracing normally
  48. * uses RCU, so we have to call the tracepoint
  49. * specially here.
  50. */
  51. trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  52. }
  53. }
  54. EXPORT_SYMBOL_GPL(leave_mm);
  55. #endif /* CONFIG_SMP */
  56. void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  57. struct task_struct *tsk)
  58. {
  59. unsigned long flags;
  60. local_irq_save(flags);
  61. switch_mm_irqs_off(prev, next, tsk);
  62. local_irq_restore(flags);
  63. }
  64. void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  65. struct task_struct *tsk)
  66. {
  67. unsigned cpu = smp_processor_id();
  68. if (likely(prev != next)) {
  69. if (IS_ENABLED(CONFIG_VMAP_STACK)) {
  70. /*
  71. * If our current stack is in vmalloc space and isn't
  72. * mapped in the new pgd, we'll double-fault. Forcibly
  73. * map it.
  74. */
  75. unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
  76. pgd_t *pgd = next->pgd + stack_pgd_index;
  77. if (unlikely(pgd_none(*pgd)))
  78. set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
  79. }
  80. #ifdef CONFIG_SMP
  81. this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  82. this_cpu_write(cpu_tlbstate.active_mm, next);
  83. #endif
  84. cpumask_set_cpu(cpu, mm_cpumask(next));
  85. /*
  86. * Re-load page tables.
  87. *
  88. * This logic has an ordering constraint:
  89. *
  90. * CPU 0: Write to a PTE for 'next'
  91. * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
  92. * CPU 1: set bit 1 in next's mm_cpumask
  93. * CPU 1: load from the PTE that CPU 0 writes (implicit)
  94. *
  95. * We need to prevent an outcome in which CPU 1 observes
  96. * the new PTE value and CPU 0 observes bit 1 clear in
  97. * mm_cpumask. (If that occurs, then the IPI will never
  98. * be sent, and CPU 0's TLB will contain a stale entry.)
  99. *
  100. * The bad outcome can occur if either CPU's load is
  101. * reordered before that CPU's store, so both CPUs must
  102. * execute full barriers to prevent this from happening.
  103. *
  104. * Thus, switch_mm needs a full barrier between the
  105. * store to mm_cpumask and any operation that could load
  106. * from next->pgd. TLB fills are special and can happen
  107. * due to instruction fetches or for no reason at all,
  108. * and neither LOCK nor MFENCE orders them.
  109. * Fortunately, load_cr3() is serializing and gives the
  110. * ordering guarantee we need.
  111. *
  112. */
  113. load_cr3(next->pgd);
  114. trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  115. /* Stop flush ipis for the previous mm */
  116. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  117. /* Load per-mm CR4 state */
  118. load_mm_cr4(next);
  119. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  120. /*
  121. * Load the LDT, if the LDT is different.
  122. *
  123. * It's possible that prev->context.ldt doesn't match
  124. * the LDT register. This can happen if leave_mm(prev)
  125. * was called and then modify_ldt changed
  126. * prev->context.ldt but suppressed an IPI to this CPU.
  127. * In this case, prev->context.ldt != NULL, because we
  128. * never set context.ldt to NULL while the mm still
  129. * exists. That means that next->context.ldt !=
  130. * prev->context.ldt, because mms never share an LDT.
  131. */
  132. if (unlikely(prev->context.ldt != next->context.ldt))
  133. load_mm_ldt(next);
  134. #endif
  135. }
  136. #ifdef CONFIG_SMP
  137. else {
  138. this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  139. BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
  140. if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
  141. /*
  142. * On established mms, the mm_cpumask is only changed
  143. * from irq context, from ptep_clear_flush() while in
  144. * lazy tlb mode, and here. Irqs are blocked during
  145. * schedule, protecting us from simultaneous changes.
  146. */
  147. cpumask_set_cpu(cpu, mm_cpumask(next));
  148. /*
  149. * We were in lazy tlb mode and leave_mm disabled
  150. * tlb flush IPI delivery. We must reload CR3
  151. * to make sure to use no freed page tables.
  152. *
  153. * As above, load_cr3() is serializing and orders TLB
  154. * fills with respect to the mm_cpumask write.
  155. */
  156. load_cr3(next->pgd);
  157. trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  158. load_mm_cr4(next);
  159. load_mm_ldt(next);
  160. }
  161. }
  162. #endif
  163. }
  164. #ifdef CONFIG_SMP
  165. /*
  166. * The flush IPI assumes that a thread switch happens in this order:
  167. * [cpu0: the cpu that switches]
  168. * 1) switch_mm() either 1a) or 1b)
  169. * 1a) thread switch to a different mm
  170. * 1a1) set cpu_tlbstate to TLBSTATE_OK
  171. * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
  172. * if cpu0 was in lazy tlb mode.
  173. * 1a2) update cpu active_mm
  174. * Now cpu0 accepts tlb flushes for the new mm.
  175. * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
  176. * Now the other cpus will send tlb flush ipis.
  177. * 1a4) change cr3.
  178. * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
  179. * Stop ipi delivery for the old mm. This is not synchronized with
  180. * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
  181. * mm, and in the worst case we perform a superfluous tlb flush.
  182. * 1b) thread switch without mm change
  183. * cpu active_mm is correct, cpu0 already handles flush ipis.
  184. * 1b1) set cpu_tlbstate to TLBSTATE_OK
  185. * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  186. * Atomically set the bit [other cpus will start sending flush ipis],
  187. * and test the bit.
  188. * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
  189. * 2) switch %%esp, ie current
  190. *
  191. * The interrupt must handle 2 special cases:
  192. * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
  193. * - the cpu performs speculative tlb reads, i.e. even if the cpu only
  194. * runs in kernel space, the cpu could load tlb entries for user space
  195. * pages.
  196. *
  197. * The good news is that cpu_tlbstate is local to each cpu, no
  198. * write/read ordering problems.
  199. */
  200. /*
  201. * TLB flush funcation:
  202. * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
  203. * 2) Leave the mm if we are in the lazy tlb mode.
  204. */
  205. static void flush_tlb_func(void *info)
  206. {
  207. struct flush_tlb_info *f = info;
  208. inc_irq_stat(irq_tlb_count);
  209. if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
  210. return;
  211. count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
  212. if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
  213. if (f->flush_end == TLB_FLUSH_ALL) {
  214. local_flush_tlb();
  215. trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
  216. } else {
  217. unsigned long addr;
  218. unsigned long nr_pages =
  219. (f->flush_end - f->flush_start) / PAGE_SIZE;
  220. addr = f->flush_start;
  221. while (addr < f->flush_end) {
  222. __flush_tlb_single(addr);
  223. addr += PAGE_SIZE;
  224. }
  225. trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
  226. }
  227. } else
  228. leave_mm(smp_processor_id());
  229. }
  230. void native_flush_tlb_others(const struct cpumask *cpumask,
  231. struct mm_struct *mm, unsigned long start,
  232. unsigned long end)
  233. {
  234. struct flush_tlb_info info;
  235. if (end == 0)
  236. end = start + PAGE_SIZE;
  237. info.flush_mm = mm;
  238. info.flush_start = start;
  239. info.flush_end = end;
  240. count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
  241. if (end == TLB_FLUSH_ALL)
  242. trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
  243. else
  244. trace_tlb_flush(TLB_REMOTE_SEND_IPI,
  245. (end - start) >> PAGE_SHIFT);
  246. if (is_uv_system()) {
  247. unsigned int cpu;
  248. cpu = smp_processor_id();
  249. cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
  250. if (cpumask)
  251. smp_call_function_many(cpumask, flush_tlb_func,
  252. &info, 1);
  253. return;
  254. }
  255. smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
  256. }
  257. void flush_tlb_current_task(void)
  258. {
  259. struct mm_struct *mm = current->mm;
  260. preempt_disable();
  261. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
  262. /* This is an implicit full barrier that synchronizes with switch_mm. */
  263. local_flush_tlb();
  264. trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
  265. if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
  266. flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
  267. preempt_enable();
  268. }
  269. /*
  270. * See Documentation/x86/tlb.txt for details. We choose 33
  271. * because it is large enough to cover the vast majority (at
  272. * least 95%) of allocations, and is small enough that we are
  273. * confident it will not cause too much overhead. Each single
  274. * flush is about 100 ns, so this caps the maximum overhead at
  275. * _about_ 3,000 ns.
  276. *
  277. * This is in units of pages.
  278. */
  279. static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
  280. void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  281. unsigned long end, unsigned long vmflag)
  282. {
  283. unsigned long addr;
  284. /* do a global flush by default */
  285. unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
  286. preempt_disable();
  287. if (current->active_mm != mm) {
  288. /* Synchronize with switch_mm. */
  289. smp_mb();
  290. goto out;
  291. }
  292. if (!current->mm) {
  293. leave_mm(smp_processor_id());
  294. /* Synchronize with switch_mm. */
  295. smp_mb();
  296. goto out;
  297. }
  298. if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
  299. base_pages_to_flush = (end - start) >> PAGE_SHIFT;
  300. /*
  301. * Both branches below are implicit full barriers (MOV to CR or
  302. * INVLPG) that synchronize with switch_mm.
  303. */
  304. if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
  305. base_pages_to_flush = TLB_FLUSH_ALL;
  306. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
  307. local_flush_tlb();
  308. } else {
  309. /* flush range by one by one 'invlpg' */
  310. for (addr = start; addr < end; addr += PAGE_SIZE) {
  311. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
  312. __flush_tlb_single(addr);
  313. }
  314. }
  315. trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
  316. out:
  317. if (base_pages_to_flush == TLB_FLUSH_ALL) {
  318. start = 0UL;
  319. end = TLB_FLUSH_ALL;
  320. }
  321. if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
  322. flush_tlb_others(mm_cpumask(mm), mm, start, end);
  323. preempt_enable();
  324. }
  325. void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
  326. {
  327. struct mm_struct *mm = vma->vm_mm;
  328. preempt_disable();
  329. if (current->active_mm == mm) {
  330. if (current->mm) {
  331. /*
  332. * Implicit full barrier (INVLPG) that synchronizes
  333. * with switch_mm.
  334. */
  335. __flush_tlb_one(start);
  336. } else {
  337. leave_mm(smp_processor_id());
  338. /* Synchronize with switch_mm. */
  339. smp_mb();
  340. }
  341. }
  342. if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
  343. flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
  344. preempt_enable();
  345. }
  346. static void do_flush_tlb_all(void *info)
  347. {
  348. count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
  349. __flush_tlb_all();
  350. if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
  351. leave_mm(smp_processor_id());
  352. }
  353. void flush_tlb_all(void)
  354. {
  355. count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
  356. on_each_cpu(do_flush_tlb_all, NULL, 1);
  357. }
  358. static void do_kernel_range_flush(void *info)
  359. {
  360. struct flush_tlb_info *f = info;
  361. unsigned long addr;
  362. /* flush range by one by one 'invlpg' */
  363. for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
  364. __flush_tlb_single(addr);
  365. }
  366. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  367. {
  368. /* Balance as user space task's flush, a bit conservative */
  369. if (end == TLB_FLUSH_ALL ||
  370. (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
  371. on_each_cpu(do_flush_tlb_all, NULL, 1);
  372. } else {
  373. struct flush_tlb_info info;
  374. info.flush_start = start;
  375. info.flush_end = end;
  376. on_each_cpu(do_kernel_range_flush, &info, 1);
  377. }
  378. }
  379. static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
  380. size_t count, loff_t *ppos)
  381. {
  382. char buf[32];
  383. unsigned int len;
  384. len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
  385. return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  386. }
  387. static ssize_t tlbflush_write_file(struct file *file,
  388. const char __user *user_buf, size_t count, loff_t *ppos)
  389. {
  390. char buf[32];
  391. ssize_t len;
  392. int ceiling;
  393. len = min(count, sizeof(buf) - 1);
  394. if (copy_from_user(buf, user_buf, len))
  395. return -EFAULT;
  396. buf[len] = '\0';
  397. if (kstrtoint(buf, 0, &ceiling))
  398. return -EINVAL;
  399. if (ceiling < 0)
  400. return -EINVAL;
  401. tlb_single_page_flush_ceiling = ceiling;
  402. return count;
  403. }
  404. static const struct file_operations fops_tlbflush = {
  405. .read = tlbflush_read_file,
  406. .write = tlbflush_write_file,
  407. .llseek = default_llseek,
  408. };
  409. static int __init create_tlb_single_page_flush_ceiling(void)
  410. {
  411. debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
  412. arch_debugfs_dir, NULL, &fops_tlbflush);
  413. return 0;
  414. }
  415. late_initcall(create_tlb_single_page_flush_ceiling);
  416. #endif /* CONFIG_SMP */