|
@@ -27,40 +27,37 @@
|
|
|
static inline pte_t ptep_flush_direct(struct mm_struct *mm,
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
- int active, count;
|
|
|
pte_t old;
|
|
|
|
|
|
old = *ptep;
|
|
|
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
|
|
return old;
|
|
|
- active = (mm == current->active_mm) ? 1 : 0;
|
|
|
- count = atomic_add_return(0x10000, &mm->context.attach_count);
|
|
|
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
|
|
|
+ atomic_inc(&mm->context.flush_count);
|
|
|
+ if (MACHINE_HAS_TLB_LC &&
|
|
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
|
|
__ptep_ipte_local(addr, ptep);
|
|
|
else
|
|
|
__ptep_ipte(addr, ptep);
|
|
|
- atomic_sub(0x10000, &mm->context.attach_count);
|
|
|
+ atomic_dec(&mm->context.flush_count);
|
|
|
return old;
|
|
|
}
|
|
|
|
|
|
static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
|
|
|
unsigned long addr, pte_t *ptep)
|
|
|
{
|
|
|
- int active, count;
|
|
|
pte_t old;
|
|
|
|
|
|
old = *ptep;
|
|
|
if (unlikely(pte_val(old) & _PAGE_INVALID))
|
|
|
return old;
|
|
|
- active = (mm == current->active_mm) ? 1 : 0;
|
|
|
- count = atomic_add_return(0x10000, &mm->context.attach_count);
|
|
|
- if ((count & 0xffff) <= active) {
|
|
|
+ atomic_inc(&mm->context.flush_count);
|
|
|
+ if (cpumask_equal(&mm->context.cpu_attach_mask,
|
|
|
+ cpumask_of(smp_processor_id()))) {
|
|
|
pte_val(*ptep) |= _PAGE_INVALID;
|
|
|
mm->context.flush_mm = 1;
|
|
|
} else
|
|
|
__ptep_ipte(addr, ptep);
|
|
|
- atomic_sub(0x10000, &mm->context.attach_count);
|
|
|
+ atomic_dec(&mm->context.flush_count);
|
|
|
return old;
|
|
|
}
|
|
|
|
|
@@ -289,7 +286,6 @@ EXPORT_SYMBOL(ptep_modify_prot_commit);
|
|
|
static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
{
|
|
|
- int active, count;
|
|
|
pmd_t old;
|
|
|
|
|
|
old = *pmdp;
|
|
@@ -299,36 +295,34 @@ static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
|
|
|
__pmdp_csp(pmdp);
|
|
|
return old;
|
|
|
}
|
|
|
- active = (mm == current->active_mm) ? 1 : 0;
|
|
|
- count = atomic_add_return(0x10000, &mm->context.attach_count);
|
|
|
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
|
|
|
+ atomic_inc(&mm->context.flush_count);
|
|
|
+ if (MACHINE_HAS_TLB_LC &&
|
|
|
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
|
|
|
__pmdp_idte_local(addr, pmdp);
|
|
|
else
|
|
|
__pmdp_idte(addr, pmdp);
|
|
|
- atomic_sub(0x10000, &mm->context.attach_count);
|
|
|
+ atomic_dec(&mm->context.flush_count);
|
|
|
return old;
|
|
|
}
|
|
|
|
|
|
static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
|
|
|
unsigned long addr, pmd_t *pmdp)
|
|
|
{
|
|
|
- int active, count;
|
|
|
pmd_t old;
|
|
|
|
|
|
old = *pmdp;
|
|
|
if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
|
|
|
return old;
|
|
|
- active = (mm == current->active_mm) ? 1 : 0;
|
|
|
- count = atomic_add_return(0x10000, &mm->context.attach_count);
|
|
|
- if ((count & 0xffff) <= active) {
|
|
|
+ atomic_inc(&mm->context.flush_count);
|
|
|
+ if (cpumask_equal(&mm->context.cpu_attach_mask,
|
|
|
+ cpumask_of(smp_processor_id()))) {
|
|
|
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
|
|
|
mm->context.flush_mm = 1;
|
|
|
} else if (MACHINE_HAS_IDTE)
|
|
|
__pmdp_idte(addr, pmdp);
|
|
|
else
|
|
|
__pmdp_csp(pmdp);
|
|
|
- atomic_sub(0x10000, &mm->context.attach_count);
|
|
|
+ atomic_dec(&mm->context.flush_count);
|
|
|
return old;
|
|
|
}
|
|
|
|