|
@@ -57,6 +57,23 @@ static inline void invpcid_flush_all_nonglobals(void)
|
|
|
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
|
|
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
|
|
|
|
|
+{
|
|
|
|
|
+ u64 new_tlb_gen;
|
|
|
|
|
+
|
|
|
|
|
+ /*
|
|
|
|
|
+ * Bump the generation count. This also serves as a full barrier
|
|
|
|
|
+ * that synchronizes with switch_mm(): callers are required to order
|
|
|
|
|
+ * their read of mm_cpumask after their writes to the paging
|
|
|
|
|
+ * structures.
|
|
|
|
|
+ */
|
|
|
|
|
+ smp_mb__before_atomic();
|
|
|
|
|
+ new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
|
|
|
|
|
+ smp_mb__after_atomic();
|
|
|
|
|
+
|
|
|
|
|
+ return new_tlb_gen;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
#include <asm/paravirt.h>
|
|
#include <asm/paravirt.h>
|
|
|
#else
|
|
#else
|
|
@@ -262,6 +279,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
|
|
|
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
|
|
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
|
|
|
struct mm_struct *mm)
|
|
struct mm_struct *mm)
|
|
|
{
|
|
{
|
|
|
|
|
+ inc_mm_tlb_gen(mm);
|
|
|
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
|
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
|
|
|
}
|
|
}
|
|
|
|
|
|