|
@@ -102,6 +102,36 @@ init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
|
|
|
q->func = func;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * waitqueue_active -- locklessly test for waiters on the queue
|
|
|
+ * @q: the waitqueue to test for waiters
|
|
|
+ *
|
|
|
+ * returns true if the wait list is not empty
|
|
|
+ *
|
|
|
+ * NOTE: this function is lockless and requires care, incorrect usage _will_
|
|
|
+ * lead to sporadic and non-obvious failure.
|
|
|
+ *
|
|
|
+ * Use either while holding wait_queue_head_t::lock or when used for wakeups
|
|
|
+ * with an extra smp_mb() like:
|
|
|
+ *
|
|
|
+ * CPU0 - waker CPU1 - waiter
|
|
|
+ *
|
|
|
+ * for (;;) {
|
|
|
+ * @cond = true; prepare_to_wait(&wq, &wait, state);
|
|
|
+ * smp_mb(); // smp_mb() from set_current_state()
|
|
|
+ * if (waitqueue_active(wq)) if (@cond)
|
|
|
+ * wake_up(wq); break;
|
|
|
+ * schedule();
|
|
|
+ * }
|
|
|
+ * finish_wait(&wq, &wait);
|
|
|
+ *
|
|
|
+ * Because without the explicit smp_mb() it's possible for the
|
|
|
+ * waitqueue_active() load to get hoisted over the @cond store such that we'll
|
|
|
+ * observe an empty wait list while the waiter might not observe @cond.
|
|
|
+ *
|
|
|
+ * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
|
|
|
+ * which (when the lock is uncontended) are of roughly equal cost.
|
|
|
+ */
|
|
|
static inline int waitqueue_active(wait_queue_head_t *q)
|
|
|
{
|
|
|
return !list_empty(&q->task_list);
|