|
@@ -19,6 +19,7 @@ extern spinlock_t ctx_alloc_lock;
|
|
extern unsigned long tlb_context_cache;
|
|
extern unsigned long tlb_context_cache;
|
|
extern unsigned long mmu_context_bmap[];
|
|
extern unsigned long mmu_context_bmap[];
|
|
|
|
|
|
|
|
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
|
|
void get_new_mmu_context(struct mm_struct *mm);
|
|
void get_new_mmu_context(struct mm_struct *mm);
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
void smp_new_mmu_context_version(void);
|
|
void smp_new_mmu_context_version(void);
|
|
@@ -76,8 +77,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
|
|
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
|
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
{
|
|
unsigned long ctx_valid, flags;
|
|
unsigned long ctx_valid, flags;
|
|
- int cpu;
|
|
|
|
|
|
+ int cpu = smp_processor_id();
|
|
|
|
|
|
|
|
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
|
|
if (unlikely(mm == &init_mm))
|
|
if (unlikely(mm == &init_mm))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -123,7 +125,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|
* for the first time, we must flush that context out of the
|
|
* for the first time, we must flush that context out of the
|
|
* local TLB.
|
|
* local TLB.
|
|
*/
|
|
*/
|
|
- cpu = smp_processor_id();
|
|
|
|
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
|
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
|
__flush_tlb_mm(CTX_HWBITS(mm->context),
|