|
@@ -5055,7 +5055,19 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
|
return i;
|
|
return i;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Otherwise, iterate the domains and find an elegible idle cpu.
|
|
|
|
|
|
+ * Otherwise, iterate the domains and find an eligible idle cpu.
|
|
|
|
+ *
|
|
|
|
+ * A completely idle sched group at higher domains is more
|
|
|
|
+ * desirable than an idle group at a lower level, because lower
|
|
|
|
+ * domains have smaller groups and usually share hardware
|
|
|
|
+ * resources which causes tasks to contend on them, e.g. x86
|
|
|
|
+ * hyperthread siblings in the lowest domain (SMT) can contend
|
|
|
|
+ * on the shared cpu pipeline.
|
|
|
|
+ *
|
|
|
|
+ * However, while we prefer idle groups at higher domains
|
|
|
|
+ * finding an idle cpu at the lowest domain is still better than
|
|
|
|
+ * returning 'target', which we've already established, isn't
|
|
|
|
+ * idle.
|
|
*/
|
|
*/
|
|
sd = rcu_dereference(per_cpu(sd_llc, target));
|
|
sd = rcu_dereference(per_cpu(sd_llc, target));
|
|
for_each_lower_domain(sd) {
|
|
for_each_lower_domain(sd) {
|
|
@@ -5065,11 +5077,16 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
|
tsk_cpus_allowed(p)))
|
|
tsk_cpus_allowed(p)))
|
|
goto next;
|
|
goto next;
|
|
|
|
|
|
|
|
+ /* Ensure the entire group is idle */
|
|
for_each_cpu(i, sched_group_cpus(sg)) {
|
|
for_each_cpu(i, sched_group_cpus(sg)) {
|
|
if (i == target || !idle_cpu(i))
|
|
if (i == target || !idle_cpu(i))
|
|
goto next;
|
|
goto next;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * It doesn't matter which cpu we pick, the
|
|
|
|
+ * whole group is idle.
|
|
|
|
+ */
|
|
target = cpumask_first_and(sched_group_cpus(sg),
|
|
target = cpumask_first_and(sched_group_cpus(sg),
|
|
tsk_cpus_allowed(p));
|
|
tsk_cpus_allowed(p));
|
|
goto done;
|
|
goto done;
|