|
@@ -412,8 +412,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|
|
* its already queued (either by us or someone else) and will get the
|
|
|
* wakeup due to that.
|
|
|
*
|
|
|
- * This cmpxchg() implies a full barrier, which pairs with the write
|
|
|
- * barrier implied by the wakeup in wake_up_q().
|
|
|
+ * This cmpxchg() executes a full barrier, which pairs with the full
|
|
|
+ * barrier executed by the wakeup in wake_up_q().
|
|
|
*/
|
|
|
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
|
|
|
return;
|
|
@@ -441,8 +441,8 @@ void wake_up_q(struct wake_q_head *head)
|
|
|
task->wake_q.next = NULL;
|
|
|
|
|
|
/*
|
|
|
- * wake_up_process() implies a wmb() to pair with the queueing
|
|
|
- * in wake_q_add() so as not to miss wakeups.
|
|
|
+ * wake_up_process() executes a full barrier, which pairs with
|
|
|
+ * the queueing in wake_q_add() so as not to miss wakeups.
|
|
|
*/
|
|
|
wake_up_process(task);
|
|
|
put_task_struct(task);
|
|
@@ -1879,8 +1879,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
|
|
* rq(c1)->lock (if not at the same time, then in that order).
|
|
|
* C) LOCK of the rq(c1)->lock scheduling in task
|
|
|
*
|
|
|
- * Transitivity guarantees that B happens after A and C after B.
|
|
|
- * Note: we only require RCpc transitivity.
|
|
|
+ * Release/acquire chaining guarantees that B happens after A and C after B.
|
|
|
* Note: the CPU doing B need not be c0 or c1
|
|
|
*
|
|
|
* Example:
|
|
@@ -1942,16 +1941,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
|
|
* UNLOCK rq(0)->lock
|
|
|
*
|
|
|
*
|
|
|
- * However; for wakeups there is a second guarantee we must provide, namely we
|
|
|
- * must observe the state that lead to our wakeup. That is, not only must our
|
|
|
- * task observe its own prior state, it must also observe the stores prior to
|
|
|
- * its wakeup.
|
|
|
- *
|
|
|
- * This means that any means of doing remote wakeups must order the CPU doing
|
|
|
- * the wakeup against the CPU the task is going to end up running on. This,
|
|
|
- * however, is already required for the regular Program-Order guarantee above,
|
|
|
- * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
|
|
|
- *
|
|
|
+ * However, for wakeups there is a second guarantee we must provide, namely we
|
|
|
+ * must ensure that CONDITION=1 done by the caller can not be reordered with
|
|
|
+ * accesses to the task state; see try_to_wake_up() and set_current_state().
|
|
|
*/
|
|
|
|
|
|
/**
|
|
@@ -1967,6 +1959,9 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
|
|
* Atomic against schedule() which would dequeue a task, also see
|
|
|
* set_current_state().
|
|
|
*
|
|
|
+ * This function executes a full memory barrier before accessing the task
|
|
|
+ * state; see set_current_state().
|
|
|
+ *
|
|
|
* Return: %true if @p->state changes (an actual wakeup was done),
|
|
|
* %false otherwise.
|
|
|
*/
|
|
@@ -2141,8 +2136,7 @@ out:
|
|
|
*
|
|
|
* Return: 1 if the process was woken up, 0 if it was already running.
|
|
|
*
|
|
|
- * It may be assumed that this function implies a write memory barrier before
|
|
|
- * changing the task state if and only if any tasks are woken up.
|
|
|
+ * This function executes a full memory barrier before accessing the task state.
|
|
|
*/
|
|
|
int wake_up_process(struct task_struct *p)
|
|
|
{
|