|
@@ -112,17 +112,36 @@ struct task_group;
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
|
|
|
|
|
+/*
|
|
|
+ * Special states are those that do not use the normal wait-loop pattern. See
|
|
|
+ * the comment with set_special_state().
|
|
|
+ */
|
|
|
+#define is_special_task_state(state) \
|
|
|
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
|
|
|
+
|
|
|
#define __set_current_state(state_value) \
|
|
|
do { \
|
|
|
+ WARN_ON_ONCE(is_special_task_state(state_value));\
|
|
|
current->task_state_change = _THIS_IP_; \
|
|
|
current->state = (state_value); \
|
|
|
} while (0)
|
|
|
+
|
|
|
#define set_current_state(state_value) \
|
|
|
do { \
|
|
|
+ WARN_ON_ONCE(is_special_task_state(state_value));\
|
|
|
current->task_state_change = _THIS_IP_; \
|
|
|
smp_store_mb(current->state, (state_value)); \
|
|
|
} while (0)
|
|
|
|
|
|
+#define set_special_state(state_value) \
|
|
|
+ do { \
|
|
|
+ unsigned long flags; /* may shadow */ \
|
|
|
+ WARN_ON_ONCE(!is_special_task_state(state_value)); \
|
|
|
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
|
|
+ current->task_state_change = _THIS_IP_; \
|
|
|
+ current->state = (state_value); \
|
|
|
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
|
|
+ } while (0)
|
|
|
#else
|
|
|
/*
|
|
|
* set_current_state() includes a barrier so that the write of current->state
|
|
@@ -144,8 +163,8 @@ struct task_group;
|
|
|
*
|
|
|
* The above is typically ordered against the wakeup, which does:
|
|
|
*
|
|
|
- * need_sleep = false;
|
|
|
- * wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
|
|
+ * need_sleep = false;
|
|
|
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
|
|
|
*
|
|
|
* Where wake_up_state() (and all other wakeup primitives) imply enough
|
|
|
* barriers to order the store of the variable against wakeup.
|
|
@@ -154,12 +173,33 @@ struct task_group;
|
|
|
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
|
|
|
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
|
|
|
*
|
|
|
- * This is obviously fine, since they both store the exact same value.
|
|
|
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
|
|
|
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
|
|
|
+ * a problem either because that will result in one extra go around the loop
|
|
|
+ * and our @cond test will save the day.
|
|
|
*
|
|
|
* Also see the comments of try_to_wake_up().
|
|
|
*/
|
|
|
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
|
|
|
-#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
|
|
|
+#define __set_current_state(state_value) \
|
|
|
+ current->state = (state_value)
|
|
|
+
|
|
|
+#define set_current_state(state_value) \
|
|
|
+ smp_store_mb(current->state, (state_value))
|
|
|
+
|
|
|
+/*
|
|
|
+ * set_special_state() should be used for those states when the blocking task
|
|
|
+ * can not use the regular condition based wait-loop. In that case we must
|
|
|
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
|
|
|
+ * will not collide with our state change.
|
|
|
+ */
|
|
|
+#define set_special_state(state_value) \
|
|
|
+ do { \
|
|
|
+ unsigned long flags; /* may shadow */ \
|
|
|
+ raw_spin_lock_irqsave(¤t->pi_lock, flags); \
|
|
|
+ current->state = (state_value); \
|
|
|
+ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
|
|
|
+ } while (0)
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
/* Task command name length: */
|