|
@@ -229,6 +229,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
+static int pull_rt_task(struct rq *this_rq);
|
|
|
|
+
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
static inline int rt_overloaded(struct rq *rq)
|
|
{
|
|
{
|
|
return atomic_read(&rq->rd->rto_count);
|
|
return atomic_read(&rq->rd->rto_count);
|
|
@@ -1330,6 +1332,12 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|
struct task_struct *p;
|
|
struct task_struct *p;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
struct rt_rq *rt_rq = &rq->rt;
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
|
|
+ /* Try to pull RT tasks here if we lower this rq's prio */
|
|
|
|
+ if (rq->rt.highest_prio.curr > prev->prio)
|
|
|
|
+ pull_rt_task(rq);
|
|
|
|
+#endif
|
|
|
|
+
|
|
if (!rt_rq->rt_nr_running)
|
|
if (!rt_rq->rt_nr_running)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
@@ -1721,13 +1729,6 @@ skip:
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
|
|
|
|
-{
|
|
|
|
- /* Try to pull RT tasks here if we lower this rq's prio */
|
|
|
|
- if (rq->rt.highest_prio.curr > prev->prio)
|
|
|
|
- pull_rt_task(rq);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void post_schedule_rt(struct rq *rq)
|
|
static void post_schedule_rt(struct rq *rq)
|
|
{
|
|
{
|
|
push_rt_tasks(rq);
|
|
push_rt_tasks(rq);
|
|
@@ -2004,7 +2005,6 @@ const struct sched_class rt_sched_class = {
|
|
.set_cpus_allowed = set_cpus_allowed_rt,
|
|
.set_cpus_allowed = set_cpus_allowed_rt,
|
|
.rq_online = rq_online_rt,
|
|
.rq_online = rq_online_rt,
|
|
.rq_offline = rq_offline_rt,
|
|
.rq_offline = rq_offline_rt,
|
|
- .pre_schedule = pre_schedule_rt,
|
|
|
|
.post_schedule = post_schedule_rt,
|
|
.post_schedule = post_schedule_rt,
|
|
.task_woken = task_woken_rt,
|
|
.task_woken = task_woken_rt,
|
|
.switched_from = switched_from_rt,
|
|
.switched_from = switched_from_rt,
|