|
@@ -187,6 +187,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|
|
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
|
|
|
unsigned cpu = smp_processor_id();
|
|
|
u64 next_tlb_gen;
|
|
|
+ bool need_flush;
|
|
|
+ u16 new_asid;
|
|
|
|
|
|
/*
|
|
|
* NB: The scheduler will call us with prev == next when switching
|
|
@@ -252,8 +254,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
|
|
return;
|
|
|
} else {
|
|
|
- u16 new_asid;
|
|
|
- bool need_flush;
|
|
|
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
|
|
|
|
|
|
/*
|
|
@@ -308,44 +308,44 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|
|
/* Let nmi_uaccess_okay() know that we're changing CR3. */
|
|
|
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
|
|
|
barrier();
|
|
|
+ }
|
|
|
|
|
|
- if (need_flush) {
|
|
|
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
|
|
- this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
|
|
- load_new_mm_cr3(next->pgd, new_asid, true);
|
|
|
-
|
|
|
- /*
|
|
|
- * NB: This gets called via leave_mm() in the idle path
|
|
|
- * where RCU functions differently. Tracing normally
|
|
|
- * uses RCU, so we need to use the _rcuidle variant.
|
|
|
- *
|
|
|
- * (There is no good reason for this. The idle code should
|
|
|
- * be rearranged to call this before rcu_idle_enter().)
|
|
|
- */
|
|
|
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
|
- } else {
|
|
|
- /* The new ASID is already up to date. */
|
|
|
- load_new_mm_cr3(next->pgd, new_asid, false);
|
|
|
-
|
|
|
- /* See above wrt _rcuidle. */
|
|
|
- trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
|
|
- }
|
|
|
+ if (need_flush) {
|
|
|
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
|
|
|
+ this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
|
|
|
+ load_new_mm_cr3(next->pgd, new_asid, true);
|
|
|
|
|
|
/*
|
|
|
- * Record last user mm's context id, so we can avoid
|
|
|
- * flushing branch buffer with IBPB if we switch back
|
|
|
- * to the same user.
|
|
|
+ * NB: This gets called via leave_mm() in the idle path
|
|
|
+ * where RCU functions differently. Tracing normally
|
|
|
+ * uses RCU, so we need to use the _rcuidle variant.
|
|
|
+ *
|
|
|
+ * (There is no good reason for this. The idle code should
|
|
|
+ * be rearranged to call this before rcu_idle_enter().)
|
|
|
*/
|
|
|
- if (next != &init_mm)
|
|
|
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
|
|
-
|
|
|
- /* Make sure we write CR3 before loaded_mm. */
|
|
|
- barrier();
|
|
|
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
|
|
|
+ } else {
|
|
|
+ /* The new ASID is already up to date. */
|
|
|
+ load_new_mm_cr3(next->pgd, new_asid, false);
|
|
|
|
|
|
- this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
|
|
- this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
|
|
+ /* See above wrt _rcuidle. */
|
|
|
+ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Record last user mm's context id, so we can avoid
|
|
|
+ * flushing branch buffer with IBPB if we switch back
|
|
|
+ * to the same user.
|
|
|
+ */
|
|
|
+ if (next != &init_mm)
|
|
|
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
|
|
+
|
|
|
+ /* Make sure we write CR3 before loaded_mm. */
|
|
|
+ barrier();
|
|
|
+
|
|
|
+ this_cpu_write(cpu_tlbstate.loaded_mm, next);
|
|
|
+ this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
|
|
|
+
|
|
|
load_mm_cr4(next);
|
|
|
switch_ldt(real_prev, next);
|
|
|
}
|