|
@@ -12,6 +12,7 @@
|
|
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/cpu.h>
|
|
|
|
+#include <linux/sched/mm.h>
|
|
|
|
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
@@ -58,6 +59,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|
*
|
|
*
|
|
* On the read side the barrier is in pte_xchg(), which orders
|
|
* On the read side the barrier is in pte_xchg(), which orders
|
|
* the store to the PTE vs the load of mm_cpumask.
|
|
* the store to the PTE vs the load of mm_cpumask.
|
|
|
|
+ *
|
|
|
|
+ * This full barrier is needed by membarrier when switching
|
|
|
|
+ * between processes after store to rq->curr, before user-space
|
|
|
|
+ * memory accesses.
|
|
*/
|
|
*/
|
|
smp_mb();
|
|
smp_mb();
|
|
|
|
|
|
@@ -80,6 +85,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|
|
|
|
|
if (new_on_cpu)
|
|
if (new_on_cpu)
|
|
radix_kvm_prefetch_workaround(next);
|
|
radix_kvm_prefetch_workaround(next);
|
|
|
|
+ else
|
|
|
|
+ membarrier_arch_switch_mm(prev, next, tsk);
|
|
|
|
|
|
/*
|
|
/*
|
|
* The actual HW switching method differs between the various
|
|
* The actual HW switching method differs between the various
|