|
@@ -916,7 +916,7 @@ void exit_pi_state_list(struct task_struct *curr)
|
|
|
pi_state->owner = NULL;
|
|
|
raw_spin_unlock_irq(&curr->pi_lock);
|
|
|
|
|
|
- rt_mutex_unlock(&pi_state->pi_mutex);
|
|
|
+ rt_mutex_futex_unlock(&pi_state->pi_mutex);
|
|
|
|
|
|
spin_unlock(&hb->lock);
|
|
|
|
|
@@ -1364,20 +1364,18 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *top_waiter
|
|
|
pi_state->owner = new_owner;
|
|
|
raw_spin_unlock(&new_owner->pi_lock);
|
|
|
|
|
|
- raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
|
-
|
|
|
- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
|
|
-
|
|
|
/*
|
|
|
- * First unlock HB so the waiter does not spin on it once he got woken
|
|
|
- * up. Second wake up the waiter before the priority is adjusted. If we
|
|
|
- * deboost first (and lose our higher priority), then the task might get
|
|
|
- * scheduled away before the wake up can take place.
|
|
|
+ * We've updated the uservalue, this unlock cannot fail.
|
|
|
*/
|
|
|
+ deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
|
|
|
+
|
|
|
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
|
|
|
spin_unlock(&hb->lock);
|
|
|
- wake_up_q(&wake_q);
|
|
|
- if (deboost)
|
|
|
+
|
|
|
+ if (deboost) {
|
|
|
+ wake_up_q(&wake_q);
|
|
|
rt_mutex_adjust_prio(current);
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -2253,7 +2251,7 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
|
|
|
* task acquired the rt_mutex after we removed ourself from the
|
|
|
* rt_mutex waiters list.
|
|
|
*/
|
|
|
- if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
|
|
|
+ if (rt_mutex_futex_trylock(&q->pi_state->pi_mutex)) {
|
|
|
locked = 1;
|
|
|
goto out;
|
|
|
}
|
|
@@ -2568,7 +2566,7 @@ retry_private:
|
|
|
if (!trylock) {
|
|
|
ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
|
|
|
} else {
|
|
|
- ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
|
|
|
+ ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
|
|
|
/* Fixup the trylock return value: */
|
|
|
ret = ret ? 0 : -EWOULDBLOCK;
|
|
|
}
|
|
@@ -2591,7 +2589,7 @@ retry_private:
|
|
|
* it and return the fault to userspace.
|
|
|
*/
|
|
|
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
|
|
|
- rt_mutex_unlock(&q.pi_state->pi_mutex);
|
|
|
+ rt_mutex_futex_unlock(&q.pi_state->pi_mutex);
|
|
|
|
|
|
/* Unqueue and drop the lock */
|
|
|
unqueue_me_pi(&q);
|
|
@@ -2898,7 +2896,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
|
spin_lock(q.lock_ptr);
|
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
|
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
|
|
|
- rt_mutex_unlock(&q.pi_state->pi_mutex);
|
|
|
+ rt_mutex_futex_unlock(&q.pi_state->pi_mutex);
|
|
|
/*
|
|
|
* Drop the reference to the pi state which
|
|
|
* the requeue_pi() code acquired for us.
|
|
@@ -2938,7 +2936,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
|
* userspace.
|
|
|
*/
|
|
|
if (ret && rt_mutex_owner(pi_mutex) == current)
|
|
|
- rt_mutex_unlock(pi_mutex);
|
|
|
+ rt_mutex_futex_unlock(pi_mutex);
|
|
|
|
|
|
/* Unqueue and drop the lock. */
|
|
|
unqueue_me_pi(&q);
|