|
@@ -6436,13 +6436,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|
|
|
|
|
if (env->idle == CPU_IDLE) {
|
|
|
/*
|
|
|
- * This cpu is idle. If the busiest group load doesn't
|
|
|
- * have more tasks than the number of available cpu's and
|
|
|
- * there is no imbalance between this and busiest group
|
|
|
- * wrt to idle cpu's, it is balanced.
|
|
|
+ * This cpu is idle. If the busiest group is not overloaded
|
|
|
+ * and there is no imbalance between this and busiest group
|
|
|
+ * wrt idle cpus, it is balanced. The imbalance becomes
|
|
|
+ * significant if the diff is greater than 1 otherwise we
|
|
|
+ * might end up to just move the imbalance on another group
|
|
|
*/
|
|
|
- if ((local->idle_cpus < busiest->idle_cpus) &&
|
|
|
- busiest->sum_nr_running <= busiest->group_weight)
|
|
|
+ if ((busiest->group_type != group_overloaded) &&
|
|
|
+ (local->idle_cpus <= (busiest->idle_cpus + 1)))
|
|
|
goto out_balanced;
|
|
|
} else {
|
|
|
/*
|