|
@@ -368,20 +368,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
|
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
|
|
|
return;
|
|
|
|
|
|
- if (tlb_defer_switch_to_init_mm()) {
|
|
|
- /*
|
|
|
- * There's a significant optimization that may be possible
|
|
|
- * here. We have accurate enough TLB flush tracking that we
|
|
|
- * don't need to maintain coherence of TLB per se when we're
|
|
|
- * lazy. We do, however, need to maintain coherence of
|
|
|
- * paging-structure caches. We could, in principle, leave our
|
|
|
- * old mm loaded and only switch to init_mm when
|
|
|
- * tlb_remove_page() happens.
|
|
|
- */
|
|
|
- this_cpu_write(cpu_tlbstate.is_lazy, true);
|
|
|
- } else {
|
|
|
- switch_mm(NULL, &init_mm, NULL);
|
|
|
- }
|
|
|
+ this_cpu_write(cpu_tlbstate.is_lazy, true);
|
|
|
}
|
|
|
|
|
|
/*
|