|
@@ -920,6 +920,50 @@ enum cpu_idle_type {
|
|
#define SCHED_CAPACITY_SHIFT 10
|
|
#define SCHED_CAPACITY_SHIFT 10
|
|
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
|
|
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Wake-queues are lists of tasks with a pending wakeup, whose
|
|
|
|
+ * callers have already marked the task as woken internally,
|
|
|
|
+ * and can thus carry on. A common use case is being able to
|
|
|
|
+ * do the wakeups once the corresponding user lock as been
|
|
|
|
+ * released.
|
|
|
|
+ *
|
|
|
|
+ * We hold reference to each task in the list across the wakeup,
|
|
|
|
+ * thus guaranteeing that the memory is still valid by the time
|
|
|
|
+ * the actual wakeups are performed in wake_up_q().
|
|
|
|
+ *
|
|
|
|
+ * One per task suffices, because there's never a need for a task to be
|
|
|
|
+ * in two wake queues simultaneously; it is forbidden to abandon a task
|
|
|
|
+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
|
|
|
|
+ * already in a wake queue, the wakeup will happen soon and the second
|
|
|
|
+ * waker can just skip it.
|
|
|
|
+ *
|
|
|
|
+ * The WAKE_Q macro declares and initializes the list head.
|
|
|
|
+ * wake_up_q() does NOT reinitialize the list; it's expected to be
|
|
|
|
+ * called near the end of a function, where the fact that the queue is
|
|
|
|
+ * not used again will be easy to see by inspection.
|
|
|
|
+ *
|
|
|
|
+ * Note that this can cause spurious wakeups. schedule() callers
|
|
|
|
+ * must ensure the call is done inside a loop, confirming that the
|
|
|
|
+ * wakeup condition has in fact occurred.
|
|
|
|
+ */
|
|
|
|
+struct wake_q_node {
|
|
|
|
+ struct wake_q_node *next;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct wake_q_head {
|
|
|
|
+ struct wake_q_node *first;
|
|
|
|
+ struct wake_q_node **lastp;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
|
|
|
|
+
|
|
|
|
+#define WAKE_Q(name) \
|
|
|
|
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
|
|
|
|
+
|
|
|
|
+extern void wake_q_add(struct wake_q_head *head,
|
|
|
|
+ struct task_struct *task);
|
|
|
|
+extern void wake_up_q(struct wake_q_head *head);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* sched-domains (multiprocessor balancing) declarations:
|
|
* sched-domains (multiprocessor balancing) declarations:
|
|
*/
|
|
*/
|
|
@@ -1532,6 +1576,8 @@ struct task_struct {
|
|
/* Protection of the PI data structures: */
|
|
/* Protection of the PI data structures: */
|
|
raw_spinlock_t pi_lock;
|
|
raw_spinlock_t pi_lock;
|
|
|
|
|
|
|
|
+ struct wake_q_node wake_q;
|
|
|
|
+
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
/* PI waiters blocked on a rt_mutex held by this task */
|
|
/* PI waiters blocked on a rt_mutex held by this task */
|
|
struct rb_root pi_waiters;
|
|
struct rb_root pi_waiters;
|