|
@@ -917,9 +917,9 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
|
static void remove_waiter(struct rt_mutex *lock,
|
|
static void remove_waiter(struct rt_mutex *lock,
|
|
struct rt_mutex_waiter *waiter)
|
|
struct rt_mutex_waiter *waiter)
|
|
{
|
|
{
|
|
- int first = (waiter == rt_mutex_top_waiter(lock));
|
|
|
|
|
|
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
struct task_struct *owner = rt_mutex_owner(lock);
|
|
- struct rt_mutex *next_lock = NULL;
|
|
|
|
|
|
+ struct rt_mutex *next_lock;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
|
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
|
@@ -927,29 +927,31 @@ static void remove_waiter(struct rt_mutex *lock,
|
|
current->pi_blocked_on = NULL;
|
|
current->pi_blocked_on = NULL;
|
|
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
|
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
|
|
|
|
|
- if (!owner)
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Only update priority if the waiter was the highest priority
|
|
|
|
+ * waiter of the lock and there is an owner to update.
|
|
|
|
+ */
|
|
|
|
+ if (!owner || !is_top_waiter)
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (first) {
|
|
|
|
-
|
|
|
|
- raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
|
|
|
|
|
- rt_mutex_dequeue_pi(owner, waiter);
|
|
|
|
|
|
+ rt_mutex_dequeue_pi(owner, waiter);
|
|
|
|
|
|
- if (rt_mutex_has_waiters(lock)) {
|
|
|
|
- struct rt_mutex_waiter *next;
|
|
|
|
|
|
+ if (rt_mutex_has_waiters(lock))
|
|
|
|
+ rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
|
|
|
|
|
|
- next = rt_mutex_top_waiter(lock);
|
|
|
|
- rt_mutex_enqueue_pi(owner, next);
|
|
|
|
- }
|
|
|
|
- __rt_mutex_adjust_prio(owner);
|
|
|
|
|
|
+ __rt_mutex_adjust_prio(owner);
|
|
|
|
|
|
- /* Store the lock on which owner is blocked or NULL */
|
|
|
|
- next_lock = task_blocked_on_lock(owner);
|
|
|
|
|
|
+ /* Store the lock on which owner is blocked or NULL */
|
|
|
|
+ next_lock = task_blocked_on_lock(owner);
|
|
|
|
|
|
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
|
|
- }
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Don't walk the chain, if the owner task is not blocked
|
|
|
|
+ * itself.
|
|
|
|
+ */
|
|
if (!next_lock)
|
|
if (!next_lock)
|
|
return;
|
|
return;
|
|
|
|
|