|
@@ -48,23 +48,6 @@ static inline void __tlb_flush_global(void)
|
|
|
* Flush TLB entries for a specific mm on all CPUs (in case gmap is used
|
|
|
* this implicates multiple ASCEs!).
|
|
|
*/
|
|
|
-static inline void __tlb_flush_full(struct mm_struct *mm)
|
|
|
-{
|
|
|
- preempt_disable();
|
|
|
- atomic_inc(&mm->context.flush_count);
|
|
|
- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
|
|
- /* Local TLB flush */
|
|
|
- __tlb_flush_local();
|
|
|
- } else {
|
|
|
- /* Global TLB flush */
|
|
|
- __tlb_flush_global();
|
|
|
- /* Reset TLB flush mask */
|
|
|
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
|
|
- }
|
|
|
- atomic_dec(&mm->context.flush_count);
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
static inline void __tlb_flush_mm(struct mm_struct *mm)
|
|
|
{
|
|
|
unsigned long gmap_asce;
|
|
@@ -76,16 +59,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
|
|
|
*/
|
|
|
preempt_disable();
|
|
|
atomic_inc(&mm->context.flush_count);
|
|
|
+ /* Reset TLB flush mask */
|
|
|
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
|
|
+ barrier();
|
|
|
gmap_asce = READ_ONCE(mm->context.gmap_asce);
|
|
|
if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
|
|
|
if (gmap_asce)
|
|
|
__tlb_flush_idte(gmap_asce);
|
|
|
__tlb_flush_idte(mm->context.asce);
|
|
|
} else {
|
|
|
- __tlb_flush_full(mm);
|
|
|
+ /* Global TLB flush */
|
|
|
+ __tlb_flush_global();
|
|
|
}
|
|
|
- /* Reset TLB flush mask */
|
|
|
- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
|
|
|
atomic_dec(&mm->context.flush_count);
|
|
|
preempt_enable();
|
|
|
}
|
|
@@ -99,7 +84,6 @@ static inline void __tlb_flush_kernel(void)
|
|
|
}
|
|
|
#else
|
|
|
#define __tlb_flush_global() __tlb_flush_local()
|
|
|
-#define __tlb_flush_full(mm) __tlb_flush_local()
|
|
|
|
|
|
/*
|
|
|
* Flush TLB entries for a specific ASCE on all CPUs.
|