|
@@ -288,12 +288,10 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
|
|
{
|
|
|
struct pv_node *pn = (struct pv_node *)node;
|
|
|
struct pv_node *pp = (struct pv_node *)prev;
|
|
|
- int waitcnt = 0;
|
|
|
int loop;
|
|
|
bool wait_early;
|
|
|
|
|
|
- /* waitcnt processing will be compiled out if !QUEUED_LOCK_STAT */
|
|
|
- for (;; waitcnt++) {
|
|
|
+ for (;;) {
|
|
|
for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
|
|
|
if (READ_ONCE(node->locked))
|
|
|
return;
|
|
@@ -317,7 +315,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
|
|
|
|
|
|
if (!READ_ONCE(node->locked)) {
|
|
|
qstat_inc(qstat_pv_wait_node, true);
|
|
|
- qstat_inc(qstat_pv_wait_again, waitcnt);
|
|
|
qstat_inc(qstat_pv_wait_early, wait_early);
|
|
|
pv_wait(&pn->state, vcpu_halted);
|
|
|
}
|
|
@@ -458,12 +455,9 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
|
|
|
pv_wait(&l->locked, _Q_SLOW_VAL);
|
|
|
|
|
|
/*
|
|
|
- * The unlocker should have freed the lock before kicking the
|
|
|
- * CPU. So if the lock is still not free, it is a spurious
|
|
|
- * wakeup or another vCPU has stolen the lock. The current
|
|
|
- * vCPU should spin again.
|
|
|
+ * Because of lock stealing, the queue head vCPU may not be
|
|
|
+ * able to acquire the lock before it has to wait again.
|
|
|
*/
|
|
|
- qstat_inc(qstat_pv_spurious_wakeup, READ_ONCE(l->locked));
|
|
|
}
|
|
|
|
|
|
/*
|