|
@@ -14,6 +14,7 @@
|
|
|
#include "cpuacct.h"
|
|
|
|
|
|
struct rq;
|
|
|
+struct cpuidle_state;
|
|
|
|
|
|
/* task_struct::on_rq states: */
|
|
|
#define TASK_ON_RQ_QUEUED 1
|
|
@@ -643,6 +644,11 @@ struct rq {
|
|
|
#ifdef CONFIG_SMP
|
|
|
struct llist_head wake_list;
|
|
|
#endif
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_IDLE
|
|
|
+ /* Must be inspected within a rcu lock section */
|
|
|
+ struct cpuidle_state *idle_state;
|
|
|
+#endif
|
|
|
};
|
|
|
|
|
|
static inline int cpu_of(struct rq *rq)
|
|
@@ -1196,6 +1202,30 @@ static inline void idle_exit_fair(struct rq *rq) { }
|
|
|
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_CPU_IDLE
|
|
|
+static inline void idle_set_state(struct rq *rq,
|
|
|
+ struct cpuidle_state *idle_state)
|
|
|
+{
|
|
|
+ rq->idle_state = idle_state;
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
|
|
|
+{
|
|
|
+ WARN_ON(!rcu_read_lock_held());
|
|
|
+ return rq->idle_state;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline void idle_set_state(struct rq *rq,
|
|
|
+ struct cpuidle_state *idle_state)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
|
|
|
+{
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
extern void sysrq_sched_debug_show(void);
|
|
|
extern void sched_init_granularity(void);
|
|
|
extern void update_max_interval(void);
|