|
@@ -90,7 +90,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
|
|
|
* therefore increment the cpu number by one.
|
|
|
*/
|
|
|
|
|
|
-static inline u32 encode_tail(int cpu, int idx)
|
|
|
+static inline __pure u32 encode_tail(int cpu, int idx)
|
|
|
{
|
|
|
u32 tail;
|
|
|
|
|
@@ -103,7 +103,7 @@ static inline u32 encode_tail(int cpu, int idx)
|
|
|
return tail;
|
|
|
}
|
|
|
|
|
|
-static inline struct mcs_spinlock *decode_tail(u32 tail)
|
|
|
+static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
|
|
|
{
|
|
|
int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
|
|
|
int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
|
|
@@ -455,6 +455,8 @@ queue:
|
|
|
* pending stuff.
|
|
|
*
|
|
|
* p,*,* -> n,*,*
|
|
|
+ *
|
|
|
+ * RELEASE, such that the stores to @node must be complete.
|
|
|
*/
|
|
|
old = xchg_tail(lock, tail);
|
|
|
next = NULL;
|
|
@@ -465,6 +467,15 @@ queue:
|
|
|
*/
|
|
|
if (old & _Q_TAIL_MASK) {
|
|
|
prev = decode_tail(old);
|
|
|
+ /*
|
|
|
+ * The above xchg_tail() is also a load of @lock which generates,
|
|
|
+ * through decode_tail(), a pointer.
|
|
|
+ *
|
|
|
+ * The address dependency matches the RELEASE of xchg_tail()
|
|
|
+ * such that the access to @prev must happen after.
|
|
|
+ */
|
|
|
+ smp_read_barrier_depends();
|
|
|
+
|
|
|
WRITE_ONCE(prev->next, node);
|
|
|
|
|
|
pv_wait_node(node, prev);
|