|
|
@@ -164,8 +164,9 @@ unsigned long tlb_single_page_flush_ceiling = 1;
|
|
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
unsigned long end, unsigned long vmflag)
|
|
|
{
|
|
|
- int need_flush_others_all = 1;
|
|
|
unsigned long addr;
|
|
|
+ /* do a global flush by default */
|
|
|
+ unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
|
|
|
|
|
|
preempt_disable();
|
|
|
if (current->active_mm != mm)
|
|
|
@@ -176,16 +177,14 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (end == TLB_FLUSH_ALL || vmflag & VM_HUGETLB) {
|
|
|
- local_flush_tlb();
|
|
|
- goto out;
|
|
|
- }
|
|
|
+ if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
|
|
|
+ base_pages_to_flush = (end - start) >> PAGE_SHIFT;
|
|
|
|
|
|
- if ((end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
|
|
|
+ if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
|
|
|
+ base_pages_to_flush = TLB_FLUSH_ALL;
|
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
local_flush_tlb();
|
|
|
} else {
|
|
|
- need_flush_others_all = 0;
|
|
|
/* flush range by one by one 'invlpg' */
|
|
|
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
|
|
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
|
@@ -193,7 +192,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
}
|
|
|
}
|
|
|
out:
|
|
|
- if (need_flush_others_all) {
|
|
|
+ if (base_pages_to_flush == TLB_FLUSH_ALL) {
|
|
|
start = 0UL;
|
|
|
end = TLB_FLUSH_ALL;
|
|
|
}
|