|
@@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
{
|
|
{
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
struct rt_mutex_waiter rt_waiter;
|
|
- struct rt_mutex *pi_mutex = NULL;
|
|
|
|
struct futex_hash_bucket *hb;
|
|
struct futex_hash_bucket *hb;
|
|
union futex_key key2 = FUTEX_KEY_INIT;
|
|
union futex_key key2 = FUTEX_KEY_INIT;
|
|
struct futex_q q = futex_q_init;
|
|
struct futex_q q = futex_q_init;
|
|
@@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
if (q.pi_state && (q.pi_state->owner != current)) {
|
|
spin_lock(q.lock_ptr);
|
|
spin_lock(q.lock_ptr);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
ret = fixup_pi_state_owner(uaddr2, &q, current);
|
|
|
|
+ if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
|
|
|
|
+ rt_mutex_unlock(&q.pi_state->pi_mutex);
|
|
/*
|
|
/*
|
|
* Drop the reference to the pi state which
|
|
* Drop the reference to the pi state which
|
|
* the requeue_pi() code acquired for us.
|
|
* the requeue_pi() code acquired for us.
|
|
@@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
spin_unlock(q.lock_ptr);
|
|
spin_unlock(q.lock_ptr);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
|
|
+ struct rt_mutex *pi_mutex;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* We have been woken up by futex_unlock_pi(), a timeout, or a
|
|
* We have been woken up by futex_unlock_pi(), a timeout, or a
|
|
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
|
|
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
|
|
@@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
|
|
if (res)
|
|
if (res)
|
|
ret = (res < 0) ? res : 0;
|
|
ret = (res < 0) ? res : 0;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If fixup_pi_state_owner() faulted and was unable to handle
|
|
|
|
+ * the fault, unlock the rt_mutex and return the fault to
|
|
|
|
+ * userspace.
|
|
|
|
+ */
|
|
|
|
+ if (ret && rt_mutex_owner(pi_mutex) == current)
|
|
|
|
+ rt_mutex_unlock(pi_mutex);
|
|
|
|
+
|
|
/* Unqueue and drop the lock. */
|
|
/* Unqueue and drop the lock. */
|
|
unqueue_me_pi(&q);
|
|
unqueue_me_pi(&q);
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * If fixup_pi_state_owner() faulted and was unable to handle the
|
|
|
|
- * fault, unlock the rt_mutex and return the fault to userspace.
|
|
|
|
- */
|
|
|
|
- if (ret == -EFAULT) {
|
|
|
|
- if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
|
|
|
|
- rt_mutex_unlock(pi_mutex);
|
|
|
|
- } else if (ret == -EINTR) {
|
|
|
|
|
|
+ if (ret == -EINTR) {
|
|
/*
|
|
/*
|
|
* We've already been requeued, but cannot restart by calling
|
|
* We've already been requeued, but cannot restart by calling
|
|
* futex_lock_pi() directly. We could restart this syscall, but
|
|
* futex_lock_pi() directly. We could restart this syscall, but
|