|
@@ -419,6 +419,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
struct rt_mutex *lock;
|
|
|
bool detect_deadlock;
|
|
|
unsigned long flags;
|
|
|
+ bool requeue = true;
|
|
|
|
|
|
detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
|
|
|
|
|
@@ -508,18 +509,31 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
goto out_unlock_pi;
|
|
|
/*
|
|
|
* If deadlock detection is off, we stop here if we
|
|
|
- * are not the top pi waiter of the task.
|
|
|
+ * are not the top pi waiter of the task. If deadlock
|
|
|
+ * detection is enabled we continue, but stop the
|
|
|
+ * requeueing in the chain walk.
|
|
|
*/
|
|
|
- if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
|
|
|
- goto out_unlock_pi;
|
|
|
+ if (top_waiter != task_top_pi_waiter(task)) {
|
|
|
+ if (!detect_deadlock)
|
|
|
+ goto out_unlock_pi;
|
|
|
+ else
|
|
|
+ requeue = false;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * When deadlock detection is off then we check, if further
|
|
|
- * priority adjustment is necessary.
|
|
|
+ * If the waiter priority is the same as the task priority
|
|
|
+ * then there is no further priority adjustment necessary. If
|
|
|
+ * deadlock detection is off, we stop the chain walk. If its
|
|
|
+ * enabled we continue, but stop the requeueing in the chain
|
|
|
+ * walk.
|
|
|
*/
|
|
|
- if (!detect_deadlock && waiter->prio == task->prio)
|
|
|
- goto out_unlock_pi;
|
|
|
+ if (waiter->prio == task->prio) {
|
|
|
+ if (!detect_deadlock)
|
|
|
+ goto out_unlock_pi;
|
|
|
+ else
|
|
|
+ requeue = false;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* [4] Get the next lock
|
|
@@ -552,6 +566,55 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
|
|
goto out_unlock_pi;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If we just follow the lock chain for deadlock detection, no
|
|
|
+ * need to do all the requeue operations. To avoid a truckload
|
|
|
+ * of conditionals around the various places below, just do the
|
|
|
+ * minimum chain walk checks.
|
|
|
+ */
|
|
|
+ if (!requeue) {
|
|
|
+ /*
|
|
|
+ * No requeue[7] here. Just release @task [8]
|
|
|
+ */
|
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ put_task_struct(task);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * [9] check_exit_conditions_3 protected by lock->wait_lock.
|
|
|
+ * If there is no owner of the lock, end of chain.
|
|
|
+ */
|
|
|
+ if (!rt_mutex_owner(lock)) {
|
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* [10] Grab the next task, i.e. owner of @lock */
|
|
|
+ task = rt_mutex_owner(lock);
|
|
|
+ get_task_struct(task);
|
|
|
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * No requeue [11] here. We just do deadlock detection.
|
|
|
+ *
|
|
|
+ * [12] Store whether owner is blocked
|
|
|
+ * itself. Decision is made after dropping the locks
|
|
|
+ */
|
|
|
+ next_lock = task_blocked_on_lock(task);
|
|
|
+ /*
|
|
|
+ * Get the top waiter for the next iteration
|
|
|
+ */
|
|
|
+ top_waiter = rt_mutex_top_waiter(lock);
|
|
|
+
|
|
|
+ /* [13] Drop locks */
|
|
|
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
|
|
+ raw_spin_unlock(&lock->wait_lock);
|
|
|
+
|
|
|
+ /* If owner is not blocked, end of chain. */
|
|
|
+ if (!next_lock)
|
|
|
+ goto out_put_task;
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Store the current top waiter before doing the requeue
|
|
|
* operation on @lock. We need it for the boost/deboost
|