|
@@ -25,7 +25,6 @@ do { \
|
|
|
if (cpu_has_htw) { \
|
|
|
write_c0_pwbase(pgd); \
|
|
|
back_to_back_c0_hazard(); \
|
|
|
- htw_reset(); \
|
|
|
} \
|
|
|
} while (0)
|
|
|
|
|
@@ -144,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
unsigned long flags;
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
+ htw_stop();
|
|
|
/* Check if our ASID is of an older version and thus invalid */
|
|
|
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
|
|
|
get_new_mmu_context(next, cpu);
|
|
@@ -156,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
|
*/
|
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
+ htw_start();
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -182,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
+ htw_stop();
|
|
|
/* Unconditionally get a new ASID. */
|
|
|
get_new_mmu_context(next, cpu);
|
|
|
|
|
@@ -191,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
|
/* mark mmu ownership change */
|
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
|
+ htw_start();
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
@@ -205,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
|
|
unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
+ htw_stop();
|
|
|
|
|
|
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
|
|
get_new_mmu_context(mm, cpu);
|
|
@@ -213,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
|
|
/* will get a new context next time */
|
|
|
cpu_context(cpu, mm) = 0;
|
|
|
}
|
|
|
+ htw_start();
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|