|
@@ -526,12 +526,12 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
|
|
|
/*
|
|
|
* Memory barriers to keep this state in sync are graciously provided by
|
|
|
* the page table locks, outside of which no page table modifications happen.
|
|
|
- * The barriers below prevent the compiler from re-ordering the instructions
|
|
|
- * around the memory barriers that are already present in the code.
|
|
|
+ * The barriers are used to ensure the order between tlb_flush_pending updates,
|
|
|
+ * which happen while the lock is not taken, and the PTE updates, which happen
|
|
|
+ * while the lock is taken, are serialized.
|
|
|
*/
|
|
|
static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
|
|
|
{
|
|
|
- barrier();
|
|
|
return atomic_read(&mm->tlb_flush_pending) > 0;
|
|
|
}
|
|
|
|
|
@@ -554,7 +554,13 @@ static inline void inc_tlb_flush_pending(struct mm_struct *mm)
|
|
|
/* Clearing is done after a TLB flush, which also provides a barrier. */
|
|
|
static inline void dec_tlb_flush_pending(struct mm_struct *mm)
|
|
|
{
|
|
|
- barrier();
|
|
|
+ /*
|
|
|
+ * Guarantee that the tlb_flush_pending does not not leak into the
|
|
|
+ * critical section, since we must order the PTE change and changes to
|
|
|
+ * the pending TLB flush indication. We could have relied on TLB flush
|
|
|
+ * as a memory barrier, but this behavior is not clearly documented.
|
|
|
+ */
|
|
|
+ smp_mb__before_atomic();
|
|
|
atomic_dec(&mm->tlb_flush_pending);
|
|
|
}
|
|
|
#else
|