|
@@ -337,8 +337,8 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
|
|
* barriers.
|
|
|
*/
|
|
|
if (val & _Q_LOCKED_MASK) {
|
|
|
- smp_cond_load_acquire(&lock->val.counter,
|
|
|
- !(VAL & _Q_LOCKED_MASK));
|
|
|
+ atomic_cond_read_acquire(&lock->val,
|
|
|
+ !(VAL & _Q_LOCKED_MASK));
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -441,8 +441,8 @@ queue:
|
|
|
*
|
|
|
* The PV pv_wait_head_or_lock function, if active, will acquire
|
|
|
* the lock and return a non-zero value. So we have to skip the
|
|
|
- * smp_cond_load_acquire() call. As the next PV queue head hasn't been
|
|
|
- * designated yet, there is no way for the locked value to become
|
|
|
+ * atomic_cond_read_acquire() call. As the next PV queue head hasn't
|
|
|
+ * been designated yet, there is no way for the locked value to become
|
|
|
* _Q_SLOW_VAL. So both the set_locked() and the
|
|
|
* atomic_cmpxchg_relaxed() calls will be safe.
|
|
|
*
|
|
@@ -452,7 +452,7 @@ queue:
|
|
|
if ((val = pv_wait_head_or_lock(lock, node)))
|
|
|
goto locked;
|
|
|
|
|
|
- val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
|
|
|
+ val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
|
|
|
|
|
|
locked:
|
|
|
/*
|
|
@@ -469,7 +469,7 @@ locked:
|
|
|
/* In the PV case we might already have _Q_LOCKED_VAL set */
|
|
|
if ((val & _Q_TAIL_MASK) == tail) {
|
|
|
/*
|
|
|
- * The smp_cond_load_acquire() call above has provided the
|
|
|
+ * The atomic_cond_read_acquire() call above has provided the
|
|
|
* necessary acquire semantics required for locking.
|
|
|
*/
|
|
|
old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
|