|
@@ -231,6 +231,12 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
|
|
|
static int pull_rt_task(struct rq *this_rq);
|
|
static int pull_rt_task(struct rq *this_rq);
|
|
|
|
|
|
|
|
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
|
|
+{
|
|
|
|
+ /* Try to pull RT tasks here if we lower this rq's prio */
|
|
|
|
+ return rq->rt.highest_prio.curr > prev->prio;
|
|
|
|
+}
|
|
|
|
+
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
{
|
|
{
|
|
return atomic_read(&rq->rd->rto_count);
|
|
return atomic_read(&rq->rd->rto_count);
|
|
@@ -317,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq)
|
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void set_post_schedule(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * We detect this state here so that we can avoid taking the RQ
|
|
|
|
+ * lock again later if there is no need to push
|
|
|
|
+ */
|
|
|
|
+ rq->post_schedule = has_pushable_tasks(rq);
|
|
|
|
+}
|
|
|
|
+
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
@@ -361,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
{
|
|
{
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
|
|
|
|
+{
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int pull_rt_task(struct rq *this_rq)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void set_post_schedule(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+}
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
|
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
|
|
@@ -1332,11 +1360,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|
struct task_struct *p;
|
|
struct task_struct *p;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
|
- /* Try to pull RT tasks here if we lower this rq's prio */
|
|
|
|
- if (rq->rt.highest_prio.curr > prev->prio)
|
|
|
|
|
|
+ if (need_pull_rt_task(rq, prev))
|
|
pull_rt_task(rq);
|
|
pull_rt_task(rq);
|
|
-#endif
|
|
|
|
|
|
|
|
if (!rt_rq->rt_nr_running)
|
|
if (!rt_rq->rt_nr_running)
|
|
return NULL;
|
|
return NULL;
|
|
@@ -1352,13 +1377,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|
if (p)
|
|
if (p)
|
|
dequeue_pushable_task(rq, p);
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
|
- /*
|
|
|
|
- * We detect this state here so that we can avoid taking the RQ
|
|
|
|
- * lock again later if there is no need to push
|
|
|
|
- */
|
|
|
|
- rq->post_schedule = has_pushable_tasks(rq);
|
|
|
|
-#endif
|
|
|
|
|
|
+ set_post_schedule(rq);
|
|
|
|
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|