|
@@ -358,8 +358,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
|
|
* sequentiality; this is because not all clear_pending_set_locked()
|
|
|
* implementations imply full barriers.
|
|
|
*/
|
|
|
- while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
|
|
|
- cpu_relax();
|
|
|
+ smp_cond_acquire(!(atomic_read(&lock->val) & _Q_LOCKED_MASK));
|
|
|
|
|
|
/*
|
|
|
* take ownership and clear the pending bit.
|
|
@@ -435,7 +434,7 @@ queue:
|
|
|
*
|
|
|
* The PV pv_wait_head_or_lock function, if active, will acquire
|
|
|
* the lock and return a non-zero value. So we have to skip the
|
|
|
- * smp_load_acquire() call. As the next PV queue head hasn't been
|
|
|
+ * smp_cond_acquire() call. As the next PV queue head hasn't been
|
|
|
* designated yet, there is no way for the locked value to become
|
|
|
* _Q_SLOW_VAL. So both the set_locked() and the
|
|
|
* atomic_cmpxchg_relaxed() calls will be safe.
|
|
@@ -466,7 +465,7 @@ locked:
|
|
|
break;
|
|
|
}
|
|
|
/*
|
|
|
- * The smp_load_acquire() call above has provided the necessary
|
|
|
+ * The smp_cond_acquire() call above has provided the necessary
|
|
|
* acquire semantics required for locking. At most two
|
|
|
* iterations of this loop may be ran.
|
|
|
*/
|