|
@@ -528,17 +528,16 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
|
|
|
f->new_tlb_gen == local_tlb_gen + 1 &&
|
|
|
f->new_tlb_gen == mm_tlb_gen) {
|
|
|
/* Partial flush */
|
|
|
- unsigned long addr;
|
|
|
- unsigned long nr_pages = (f->end - f->start) >> PAGE_SHIFT;
|
|
|
+ unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
|
|
|
+ unsigned long addr = f->start;
|
|
|
|
|
|
- addr = f->start;
|
|
|
while (addr < f->end) {
|
|
|
__flush_tlb_one_user(addr);
|
|
|
- addr += PAGE_SIZE;
|
|
|
+ addr += 1UL << f->stride_shift;
|
|
|
}
|
|
|
if (local)
|
|
|
- count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_pages);
|
|
|
- trace_tlb_flush(reason, nr_pages);
|
|
|
+ count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
|
|
|
+ trace_tlb_flush(reason, nr_invalidate);
|
|
|
} else {
|
|
|
/* Full flush. */
|
|
|
local_flush_tlb();
|
|
@@ -623,12 +622,13 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
|
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
|
|
|
|
|
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
- unsigned long end, unsigned long vmflag)
|
|
|
+ unsigned long end, unsigned int stride_shift)
|
|
|
{
|
|
|
int cpu;
|
|
|
|
|
|
struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
|
|
|
.mm = mm,
|
|
|
+ .stride_shift = stride_shift,
|
|
|
};
|
|
|
|
|
|
cpu = get_cpu();
|
|
@@ -638,8 +638,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
|
|
|
/* Should we flush just the requested range? */
|
|
|
if ((end != TLB_FLUSH_ALL) &&
|
|
|
- !(vmflag & VM_HUGETLB) &&
|
|
|
- ((end - start) >> PAGE_SHIFT) <= tlb_single_page_flush_ceiling) {
|
|
|
+ ((end - start) >> stride_shift) <= tlb_single_page_flush_ceiling) {
|
|
|
info.start = start;
|
|
|
info.end = end;
|
|
|
} else {
|