|
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
|
|
|
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
|
|
|
return;
|
|
|
|
|
|
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
|
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
|
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
|
|
|
if (f->flush_end == TLB_FLUSH_ALL)
|
|
|
local_flush_tlb();
|
|
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
|
info.flush_start = start;
|
|
|
info.flush_end = end;
|
|
|
|
|
|
- count_vm_event(NR_TLB_REMOTE_FLUSH);
|
|
|
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
|
|
|
if (is_uv_system()) {
|
|
|
unsigned int cpu;
|
|
|
|
|
@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
local_flush_tlb();
|
|
|
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
|
|
|
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * It can find out the THP large page, or
|
|
|
- * HUGETLB page in tlb_flush when THP disabled
|
|
|
- */
|
|
|
-static inline unsigned long has_large_page(struct mm_struct *mm,
|
|
|
- unsigned long start, unsigned long end)
|
|
|
-{
|
|
|
- pgd_t *pgd;
|
|
|
- pud_t *pud;
|
|
|
- pmd_t *pmd;
|
|
|
- unsigned long addr = ALIGN(start, HPAGE_SIZE);
|
|
|
- for (; addr < end; addr += HPAGE_SIZE) {
|
|
|
- pgd = pgd_offset(mm, addr);
|
|
|
- if (likely(!pgd_none(*pgd))) {
|
|
|
- pud = pud_offset(pgd, addr);
|
|
|
- if (likely(!pud_none(*pud))) {
|
|
|
- pmd = pmd_offset(pud, addr);
|
|
|
- if (likely(!pmd_none(*pmd)))
|
|
|
- if (pmd_large(*pmd))
|
|
|
- return addr;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
unsigned long end, unsigned long vmflag)
|
|
|
{
|
|
|
unsigned long addr;
|
|
|
unsigned act_entries, tlb_entries = 0;
|
|
|
+ unsigned long nr_base_pages;
|
|
|
|
|
|
preempt_disable();
|
|
|
if (current->active_mm != mm)
|
|
@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
|
|
tlb_entries = tlb_lli_4k[ENTRIES];
|
|
|
else
|
|
|
tlb_entries = tlb_lld_4k[ENTRIES];
|
|
|
+
|
|
|
/* Assume all of TLB entries was occupied by this task */
|
|
|
- act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
|
|
|
+ act_entries = tlb_entries >> tlb_flushall_shift;
|
|
|
+ act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
|
|
|
+ nr_base_pages = (end - start) >> PAGE_SHIFT;
|
|
|
|
|
|
/* tlb_flushall_shift is on balance point, details in commit log */
|
|
|
- if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
|
|
|
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
+ if (nr_base_pages > act_entries) {
|
|
|
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
|
|
|
local_flush_tlb();
|
|
|
} else {
|
|
|
- if (has_large_page(mm, start, end)) {
|
|
|
- local_flush_tlb();
|
|
|
- goto flush_all;
|
|
|
- }
|
|
|
/* flush range by one by one 'invlpg' */
|
|
|
for (addr = start; addr < end; addr += PAGE_SIZE) {
|
|
|
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
|
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
|
|
|
__flush_tlb_single(addr);
|
|
|
}
|
|
|
|
|
@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
|
|
|
|
|
|
static void do_flush_tlb_all(void *info)
|
|
|
{
|
|
|
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
|
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
|
|
__flush_tlb_all();
|
|
|
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
|
|
|
leave_mm(smp_processor_id());
|
|
@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
|
|
|
|
|
|
void flush_tlb_all(void)
|
|
|
{
|
|
|
- count_vm_event(NR_TLB_REMOTE_FLUSH);
|
|
|
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
|
|
|
on_each_cpu(do_flush_tlb_all, NULL, 1);
|
|
|
}
|
|
|
|