|
@@ -287,20 +287,21 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
|
|
{
|
|
{
|
|
struct __qspinlock *l = (void *)lock;
|
|
struct __qspinlock *l = (void *)lock;
|
|
struct pv_node *node;
|
|
struct pv_node *node;
|
|
- u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
|
|
|
|
|
|
+ u8 locked;
|
|
|
|
|
|
/*
|
|
/*
|
|
* We must not unlock if SLOW, because in that case we must first
|
|
* We must not unlock if SLOW, because in that case we must first
|
|
* unhash. Otherwise it would be possible to have multiple @lock
|
|
* unhash. Otherwise it would be possible to have multiple @lock
|
|
* entries, which would be BAD.
|
|
* entries, which would be BAD.
|
|
*/
|
|
*/
|
|
- if (likely(lockval == _Q_LOCKED_VAL))
|
|
|
|
|
|
+ locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
|
|
|
|
+ if (likely(locked == _Q_LOCKED_VAL))
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (unlikely(lockval != _Q_SLOW_VAL)) {
|
|
|
|
- if (debug_locks_silent)
|
|
|
|
- return;
|
|
|
|
- WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
|
|
|
|
|
|
+ if (unlikely(locked != _Q_SLOW_VAL)) {
|
|
|
|
+ WARN(!debug_locks_silent,
|
|
|
|
+ "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
|
|
|
|
+ (unsigned long)lock, atomic_read(&lock->val));
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|