|
@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *hb)
|
|
|
hb_waiters_dec(hb);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * queue_me() - Enqueue the futex_q on the futex_hash_bucket
|
|
|
- * @q: The futex_q to enqueue
|
|
|
- * @hb: The destination hash bucket
|
|
|
- *
|
|
|
- * The hb->lock must be held by the caller, and is released here. A call to
|
|
|
- * queue_me() is typically paired with exactly one call to unqueue_me(). The
|
|
|
- * exceptions involve the PI related operations, which may use unqueue_me_pi()
|
|
|
- * or nothing if the unqueue is done as part of the wake process and the unqueue
|
|
|
- * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
|
|
|
- * an example).
|
|
|
- */
|
|
|
-static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
|
- __releases(&hb->lock)
|
|
|
+static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
|
{
|
|
|
int prio;
|
|
|
|
|
@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
|
plist_node_init(&q->list, prio);
|
|
|
plist_add(&q->list, &hb->chain);
|
|
|
q->task = current;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * queue_me() - Enqueue the futex_q on the futex_hash_bucket
|
|
|
+ * @q: The futex_q to enqueue
|
|
|
+ * @hb: The destination hash bucket
|
|
|
+ *
|
|
|
+ * The hb->lock must be held by the caller, and is released here. A call to
|
|
|
+ * queue_me() is typically paired with exactly one call to unqueue_me(). The
|
|
|
+ * exceptions involve the PI related operations, which may use unqueue_me_pi()
|
|
|
+ * or nothing if the unqueue is done as part of the wake process and the unqueue
|
|
|
+ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
|
|
|
+ * an example).
|
|
|
+ */
|
|
|
+static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
|
|
+ __releases(&hb->lock)
|
|
|
+{
|
|
|
+ __queue_me(q, hb);
|
|
|
spin_unlock(&hb->lock);
|
|
|
}
|
|
|
|
|
@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
|
|
|
{
|
|
|
struct hrtimer_sleeper timeout, *to = NULL;
|
|
|
struct futex_pi_state *pi_state = NULL;
|
|
|
+ struct rt_mutex_waiter rt_waiter;
|
|
|
struct futex_hash_bucket *hb;
|
|
|
struct futex_q q = futex_q_init;
|
|
|
int res, ret;
|
|
@@ -2639,24 +2645,51 @@ retry_private:
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ WARN_ON(!q.pi_state);
|
|
|
+
|
|
|
/*
|
|
|
* Only actually queue now that the atomic ops are done:
|
|
|
*/
|
|
|
- queue_me(&q, hb);
|
|
|
+ __queue_me(&q, hb);
|
|
|
|
|
|
- WARN_ON(!q.pi_state);
|
|
|
- /*
|
|
|
- * Block on the PI mutex:
|
|
|
- */
|
|
|
- if (!trylock) {
|
|
|
- ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
|
|
|
- } else {
|
|
|
+ if (trylock) {
|
|
|
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
|
|
|
/* Fixup the trylock return value: */
|
|
|
ret = ret ? 0 : -EWOULDBLOCK;
|
|
|
+ goto no_block;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We must add ourselves to the rt_mutex waitlist while holding hb->lock
|
|
|
+ * such that the hb and rt_mutex wait lists match.
|
|
|
+ */
|
|
|
+ rt_mutex_init_waiter(&rt_waiter);
|
|
|
+ ret = rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
|
|
|
+ if (ret) {
|
|
|
+ if (ret == 1)
|
|
|
+ ret = 0;
|
|
|
+
|
|
|
+ goto no_block;
|
|
|
}
|
|
|
|
|
|
+ spin_unlock(q.lock_ptr);
|
|
|
+
|
|
|
+ if (unlikely(to))
|
|
|
+ hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
|
|
|
+
|
|
|
+ ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
|
|
+
|
|
|
spin_lock(q.lock_ptr);
|
|
|
+ /*
|
|
|
+ * If we failed to acquire the lock (signal/timeout), we must
|
|
|
+ * first acquire the hb->lock before removing the lock from the
|
|
|
+ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
|
|
|
+ * wait lists consistent.
|
|
|
+ */
|
|
|
+ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
|
|
|
+ ret = 0;
|
|
|
+
|
|
|
+no_block:
|
|
|
/*
|
|
|
* Fixup the pi_state owner and possibly acquire the lock if we
|
|
|
* haven't already.
|