|
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
|
|
|
/*
|
|
/*
|
|
|
* Put the reader into the wait queue
|
|
* Put the reader into the wait queue
|
|
|
*/
|
|
*/
|
|
|
- arch_spin_lock(&lock->lock);
|
|
|
|
|
|
|
+ arch_spin_lock(&lock->wait_lock);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
|
* The ACQUIRE semantics of the following spinning code ensure
|
|
* The ACQUIRE semantics of the following spinning code ensure
|
|
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
|
|
|
/*
|
|
/*
|
|
|
* Signal the next one in queue to become queue head
|
|
* Signal the next one in queue to become queue head
|
|
|
*/
|
|
*/
|
|
|
- arch_spin_unlock(&lock->lock);
|
|
|
|
|
|
|
+ arch_spin_unlock(&lock->wait_lock);
|
|
|
}
|
|
}
|
|
|
EXPORT_SYMBOL(queued_read_lock_slowpath);
|
|
EXPORT_SYMBOL(queued_read_lock_slowpath);
|
|
|
|
|
|
|
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
|
|
|
u32 cnts;
|
|
u32 cnts;
|
|
|
|
|
|
|
|
/* Put the writer into the wait queue */
|
|
/* Put the writer into the wait queue */
|
|
|
- arch_spin_lock(&lock->lock);
|
|
|
|
|
|
|
+ arch_spin_lock(&lock->wait_lock);
|
|
|
|
|
|
|
|
/* Try to acquire the lock directly if no reader is present */
|
|
/* Try to acquire the lock directly if no reader is present */
|
|
|
if (!atomic_read(&lock->cnts) &&
|
|
if (!atomic_read(&lock->cnts) &&
|
|
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
|
|
|
cpu_relax_lowlatency();
|
|
cpu_relax_lowlatency();
|
|
|
}
|
|
}
|
|
|
unlock:
|
|
unlock:
|
|
|
- arch_spin_unlock(&lock->lock);
|
|
|
|
|
|
|
+ arch_spin_unlock(&lock->wait_lock);
|
|
|
}
|
|
}
|
|
|
EXPORT_SYMBOL(queued_write_lock_slowpath);
|
|
EXPORT_SYMBOL(queued_write_lock_slowpath);
|