|
@@ -136,9 +136,12 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
|
|
|
return;
|
|
|
}
|
|
|
ACCESS_ONCE(prev->next) = node;
|
|
|
- smp_wmb();
|
|
|
- /* Wait until the lock holder passes the lock down */
|
|
|
- while (!ACCESS_ONCE(node->locked))
|
|
|
+ /*
|
|
|
+ * Wait until the lock holder passes the lock down.
|
|
|
+ * Using smp_load_acquire() provides a memory barrier that
|
|
|
+ * ensures subsequent operations happen after the lock is acquired.
|
|
|
+ */
|
|
|
+ while (!(smp_load_acquire(&node->locked)))
|
|
|
arch_mutex_cpu_relax();
|
|
|
}
|
|
|
|
|
@@ -156,8 +159,13 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
|
|
|
while (!(next = ACCESS_ONCE(node->next)))
|
|
|
arch_mutex_cpu_relax();
|
|
|
}
|
|
|
- ACCESS_ONCE(next->locked) = 1;
|
|
|
- smp_wmb();
|
|
|
+ /*
|
|
|
+ * Pass lock to next waiter.
|
|
|
+ * smp_store_release() provides a memory barrier to ensure
|
|
|
+ * all operations in the critical section has been completed
|
|
|
+ * before unlocking.
|
|
|
+ */
|
|
|
+ smp_store_release(&next->locked, 1);
|
|
|
}
|
|
|
|
|
|
/*
|