|
@@ -161,23 +161,24 @@ void flush_tlb_current_task(void)
|
|
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
unsigned long end, unsigned long vmflag)
|
|
|
{
|
|
|
+ bool need_flush_others_all = true;
|
|
|
unsigned long addr;
|
|
|
unsigned act_entries, tlb_entries = 0;
|
|
|
unsigned long nr_base_pages;
|
|
|
|
|
|
preempt_disable();
|
|
|
if (current->active_mm != mm)
|
|
|
- goto flush_all;
|
|
|
+ goto out;
|
|
|
|
|
|
if (!current->mm) {
|
|
|
leave_mm(smp_processor_id());
|
|
|
- goto flush_all;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
|
|
|
|| vmflag & VM_HUGETLB) {
|
|
|
local_flush_tlb();
|
|
|
- goto flush_all;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
/* In modern CPU, last level tlb used for both data/ins */
|
|
@@ -196,22 +197,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
local_flush_tlb();
|
|
|
} else {
|
|
|
+ need_flush_others_all = false;
|
|
|
/* flush range by one by one 'invlpg' */
|
|
|
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
|
__flush_tlb_single(addr);
|
|
|
}
|
|
|
-
|
|
|
- if (cpumask_any_but(mm_cpumask(mm),
|
|
|
- smp_processor_id()) < nr_cpu_ids)
|
|
|
- flush_tlb_others(mm_cpumask(mm), mm, start, end);
|
|
|
- preempt_enable();
|
|
|
- return;
|
|
|
}
|
|
|
-
|
|
|
-flush_all:
|
|
|
+out:
|
|
|
+ if (need_flush_others_all) {
|
|
|
+ start = 0UL;
|
|
|
+ end = TLB_FLUSH_ALL;
|
|
|
+ }
|
|
|
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
|
|
- flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
|
|
|
+ flush_tlb_others(mm_cpumask(mm), mm, start, end);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|