|
@@ -354,13 +354,16 @@ static inline int has_pushable_tasks(struct rq *rq)
|
|
|
return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
|
}
|
|
|
|
|
|
-static inline void set_post_schedule(struct rq *rq)
|
|
|
+static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
|
|
|
+
|
|
|
+static void push_rt_tasks(struct rq *);
|
|
|
+
|
|
|
+static inline void queue_push_tasks(struct rq *rq)
|
|
|
{
|
|
|
- /*
|
|
|
- * We detect this state here so that we can avoid taking the RQ
|
|
|
- * lock again later if there is no need to push
|
|
|
- */
|
|
|
- rq->post_schedule = has_pushable_tasks(rq);
|
|
|
+ if (!has_pushable_tasks(rq))
|
|
|
+ return;
|
|
|
+
|
|
|
+ queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
|
|
|
}
|
|
|
|
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
@@ -417,7 +420,7 @@ static inline int pull_rt_task(struct rq *this_rq)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static inline void set_post_schedule(struct rq *rq)
|
|
|
+static inline void queue_push_tasks(struct rq *rq)
|
|
|
{
|
|
|
}
|
|
|
#endif /* CONFIG_SMP */
|
|
@@ -1497,7 +1500,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|
|
/* The running task is never eligible for pushing */
|
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
|
|
- set_post_schedule(rq);
|
|
|
+ queue_push_tasks(rq);
|
|
|
|
|
|
return p;
|
|
|
}
|
|
@@ -2042,11 +2045,6 @@ skip:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void post_schedule_rt(struct rq *rq)
|
|
|
-{
|
|
|
- push_rt_tasks(rq);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* If we are not running and we are not going to reschedule soon, we should
|
|
|
* try to push tasks away now
|
|
@@ -2318,7 +2316,6 @@ const struct sched_class rt_sched_class = {
|
|
|
.set_cpus_allowed = set_cpus_allowed_rt,
|
|
|
.rq_online = rq_online_rt,
|
|
|
.rq_offline = rq_offline_rt,
|
|
|
- .post_schedule = post_schedule_rt,
|
|
|
.task_woken = task_woken_rt,
|
|
|
.switched_from = switched_from_rt,
|
|
|
#endif
|