|
@@ -4681,6 +4681,22 @@ static void calc_load_migrate(struct rq *rq)
|
|
|
atomic_long_add(delta, &calc_load_tasks);
|
|
|
}
|
|
|
|
|
|
+static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static const struct sched_class fake_sched_class = {
|
|
|
+ .put_prev_task = put_prev_task_fake,
|
|
|
+};
|
|
|
+
|
|
|
+static struct task_struct fake_task = {
|
|
|
+ /*
|
|
|
+ * Avoid pull_{rt,dl}_task()
|
|
|
+ */
|
|
|
+ .prio = MAX_PRIO + 1,
|
|
|
+ .sched_class = &fake_sched_class,
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Migrate all tasks from the rq, sleeping tasks will be migrated by
|
|
|
* try_to_wake_up()->select_task_rq().
|
|
@@ -4721,7 +4737,7 @@ static void migrate_tasks(unsigned int dead_cpu)
|
|
|
if (rq->nr_running == 1)
|
|
|
break;
|
|
|
|
|
|
- next = pick_next_task(rq, NULL);
|
|
|
+ next = pick_next_task(rq, &fake_task);
|
|
|
BUG_ON(!next);
|
|
|
next->sched_class->put_prev_task(rq, next);
|
|
|
|