|
@@ -233,8 +233,19 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
|
|
|
waiter.type = RWSEM_WAITING_FOR_READ;
|
|
|
|
|
|
raw_spin_lock_irq(&sem->wait_lock);
|
|
|
- if (list_empty(&sem->wait_list))
|
|
|
+ if (list_empty(&sem->wait_list)) {
|
|
|
+ /*
|
|
|
+ * In case the wait queue is empty and the lock isn't owned
|
|
|
+ * by a writer, this reader can exit the slowpath and return
|
|
|
+ * immediately as its RWSEM_ACTIVE_READ_BIAS has already
|
|
|
+ * been set in the count.
|
|
|
+ */
|
|
|
+ if (atomic_long_read(&sem->count) >= 0) {
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
+ return sem;
|
|
|
+ }
|
|
|
adjustment += RWSEM_WAITING_BIAS;
|
|
|
+ }
|
|
|
list_add_tail(&waiter.list, &sem->wait_list);
|
|
|
|
|
|
/* we're now waiting on the lock, but no longer actively locking */
|