|
@@ -712,15 +712,50 @@ void tlb_flush_remove_tables_local(void *arg)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void mm_fill_lazy_tlb_cpu_mask(struct mm_struct *mm,
|
|
|
+ struct cpumask *lazy_cpus)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ for_each_cpu(cpu, mm_cpumask(mm)) {
|
|
|
+ if (!per_cpu(cpu_tlbstate.is_lazy, cpu))
|
|
|
+ cpumask_set_cpu(cpu, lazy_cpus);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void tlb_flush_remove_tables(struct mm_struct *mm)
|
|
|
{
|
|
|
int cpu = get_cpu();
|
|
|
+ cpumask_var_t lazy_cpus;
|
|
|
+
|
|
|
+ if (cpumask_any_but(mm_cpumask(mm), cpu) >= nr_cpu_ids) {
|
|
|
+ put_cpu();
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!zalloc_cpumask_var(&lazy_cpus, GFP_ATOMIC)) {
|
|
|
+ /*
|
|
|
+ * If the cpumask allocation fails, do a brute force flush
|
|
|
+ * on all the CPUs that have this mm loaded.
|
|
|
+ */
|
|
|
+ smp_call_function_many(mm_cpumask(mm),
|
|
|
+ tlb_flush_remove_tables_local, (void *)mm, 1);
|
|
|
+ put_cpu();
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
- * XXX: this really only needs to be called for CPUs in lazy TLB mode.
|
|
|
+ * CPUs with !is_lazy either received a TLB flush IPI while the user
|
|
|
+ * pages in this address range were unmapped, or have context switched
|
|
|
+ * and reloaded %CR3 since then.
|
|
|
+ *
|
|
|
+ * Shootdown IPIs at page table freeing time only need to be sent to
|
|
|
+ * CPUs that may have out of date TLB contents.
|
|
|
*/
|
|
|
- if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
|
|
|
- smp_call_function_many(mm_cpumask(mm), tlb_flush_remove_tables_local, (void *)mm, 1);
|
|
|
-
|
|
|
+ mm_fill_lazy_tlb_cpu_mask(mm, lazy_cpus);
|
|
|
+ smp_call_function_many(lazy_cpus,
|
|
|
+ tlb_flush_remove_tables_local, (void *)mm, 1);
|
|
|
+ free_cpumask_var(lazy_cpus);
|
|
|
put_cpu();
|
|
|
}
|
|
|
|