|
@@ -221,8 +221,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
|
|
/*
|
|
/*
|
|
* Wait for the read lock to be granted
|
|
* Wait for the read lock to be granted
|
|
*/
|
|
*/
|
|
-__visible
|
|
|
|
-struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
|
|
|
|
|
+static inline struct rw_semaphore __sched *
|
|
|
|
+__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
|
|
{
|
|
{
|
|
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
|
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
|
struct rwsem_waiter waiter;
|
|
struct rwsem_waiter waiter;
|
|
@@ -255,17 +255,44 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
|
|
|
|
|
/* wait to be given the lock */
|
|
/* wait to be given the lock */
|
|
while (true) {
|
|
while (true) {
|
|
- set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
+ set_current_state(state);
|
|
if (!waiter.task)
|
|
if (!waiter.task)
|
|
break;
|
|
break;
|
|
|
|
+ if (signal_pending_state(state, current)) {
|
|
|
|
+ raw_spin_lock_irq(&sem->wait_lock);
|
|
|
|
+ if (waiter.task)
|
|
|
|
+ goto out_nolock;
|
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
schedule();
|
|
schedule();
|
|
}
|
|
}
|
|
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
__set_current_state(TASK_RUNNING);
|
|
return sem;
|
|
return sem;
|
|
|
|
+out_nolock:
|
|
|
|
+ list_del(&waiter.list);
|
|
|
|
+ if (list_empty(&sem->wait_list))
|
|
|
|
+ atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
|
|
|
|
+ raw_spin_unlock_irq(&sem->wait_lock);
|
|
|
|
+ __set_current_state(TASK_RUNNING);
|
|
|
|
+ return ERR_PTR(-EINTR);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+__visible struct rw_semaphore * __sched
|
|
|
|
+rwsem_down_read_failed(struct rw_semaphore *sem)
|
|
|
|
+{
|
|
|
|
+ return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(rwsem_down_read_failed);
|
|
EXPORT_SYMBOL(rwsem_down_read_failed);
|
|
|
|
|
|
|
|
+__visible struct rw_semaphore * __sched
|
|
|
|
+rwsem_down_read_failed_killable(struct rw_semaphore *sem)
|
|
|
|
+{
|
|
|
|
+ return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(rwsem_down_read_failed_killable);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* This function must be called with the sem->wait_lock held to prevent
|
|
* This function must be called with the sem->wait_lock held to prevent
|
|
* race conditions between checking the rwsem wait list and setting the
|
|
* race conditions between checking the rwsem wait list and setting the
|