|
@@ -3273,10 +3273,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
|
|
struct task_struct *p;
|
|
|
|
|
|
/*
|
|
|
- * Optimization: we know that if all tasks are in
|
|
|
- * the fair class we can call that function directly:
|
|
|
+ * Optimization: we know that if all tasks are in the fair class we can
|
|
|
+ * call that function directly, but only if the @prev task wasn't of a
|
|
|
+ * higher scheduling class, because otherwise those loose the
|
|
|
+ * opportunity to pull in more work from other CPUs.
|
|
|
*/
|
|
|
- if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
|
|
|
+ if (likely((prev->sched_class == &idle_sched_class ||
|
|
|
+ prev->sched_class == &fair_sched_class) &&
|
|
|
+ rq->nr_running == rq->cfs.h_nr_running)) {
|
|
|
+
|
|
|
p = fair_sched_class.pick_next_task(rq, prev, rf);
|
|
|
if (unlikely(p == RETRY_TASK))
|
|
|
goto again;
|