|
@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
|
|
-static void percpu_flush_tlb_batch_pages(void *data)
|
|
|
-{
|
|
|
- /*
|
|
|
- * All TLB entries are flushed on the assumption that it is
|
|
|
- * cheaper to flush all TLBs and let them be refilled than
|
|
|
- * flushing individual PFNs. Note that we do not track mm's
|
|
|
- * to flush as that might simply be multiple full TLB flushes
|
|
|
- * for no gain.
|
|
|
- */
|
|
|
- count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
|
- flush_tlb_local();
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
|
|
|
* important if a PTE was dirty when it was unmapped that it's flushed
|
|
@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
|
|
|
|
|
|
cpu = get_cpu();
|
|
|
|
|
|
- trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL);
|
|
|
-
|
|
|
- if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask))
|
|
|
- percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask);
|
|
|
-
|
|
|
- if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
|
|
|
- smp_call_function_many(&tlb_ubc->cpumask,
|
|
|
- percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
|
|
|
+ if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
|
|
|
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
+ local_flush_tlb();
|
|
|
+ trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
|
|
|
}
|
|
|
+
|
|
|
+ if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
|
|
|
+ flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
|
|
|
cpumask_clear(&tlb_ubc->cpumask);
|
|
|
tlb_ubc->flush_required = false;
|
|
|
tlb_ubc->writable = false;
|