|
@@ -665,6 +665,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+static int select_idle_sibling(struct task_struct *p, int cpu);
|
|
|
static unsigned long task_h_load(struct task_struct *p);
|
|
|
|
|
|
static inline void __update_task_entity_contrib(struct sched_entity *se);
|
|
@@ -1257,6 +1258,13 @@ balance:
|
|
|
if (load_too_imbalanced(src_load, dst_load, env))
|
|
|
goto unlock;
|
|
|
|
|
|
+ /*
|
|
|
+ * One idle CPU per node is evaluated for a task numa move.
|
|
|
+ * Call select_idle_sibling to maybe find a better one.
|
|
|
+ */
|
|
|
+ if (!cur)
|
|
|
+ env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
|
|
|
+
|
|
|
assign:
|
|
|
task_numa_assign(env, cur, imp);
|
|
|
unlock:
|