|
@@ -76,6 +76,18 @@
|
|
|
#define MAX_NODES 4
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * The pending bit spinning loop count.
|
|
|
+ * This heuristic is used to limit the number of lockword accesses
|
|
|
+ * made by atomic_cond_read_relaxed when waiting for the lock to
|
|
|
+ * transition out of the "== _Q_PENDING_VAL" state. We don't spin
|
|
|
+ * indefinitely because there's no guarantee that we'll make forward
|
|
|
+ * progress.
|
|
|
+ */
|
|
|
+#ifndef _Q_PENDING_LOOPS
|
|
|
+#define _Q_PENDING_LOOPS 1
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* Per-CPU queue node structures; we can never have more than 4 nested
|
|
|
* contexts: task, softirq, hardirq, nmi.
|
|
@@ -266,13 +278,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
|
|
return;
|
|
|
|
|
|
/*
|
|
|
- * wait for in-progress pending->locked hand-overs
|
|
|
+ * Wait for in-progress pending->locked hand-overs with a bounded
|
|
|
+ * number of spins so that we guarantee forward progress.
|
|
|
*
|
|
|
* 0,1,0 -> 0,0,1
|
|
|
*/
|
|
|
if (val == _Q_PENDING_VAL) {
|
|
|
- while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
|
|
|
- cpu_relax();
|
|
|
+ int cnt = _Q_PENDING_LOOPS;
|
|
|
+ val = atomic_cond_read_relaxed(&lock->val,
|
|
|
+ (VAL != _Q_PENDING_VAL) || !cnt--);
|
|
|
}
|
|
|
|
|
|
/*
|