|
@@ -6679,6 +6679,9 @@ static bool update_sd_pick_busiest(struct lb_env *env,
|
|
|
if (!(env->sd->flags & SD_ASYM_PACKING))
|
|
|
return true;
|
|
|
|
|
|
+ /* No ASYM_PACKING if target cpu is already busy */
|
|
|
+ if (env->idle == CPU_NOT_IDLE)
|
|
|
+ return true;
|
|
|
/*
|
|
|
* ASYM_PACKING needs to move all the work to the lowest
|
|
|
* numbered CPUs in the group, therefore mark all groups
|
|
@@ -6688,7 +6691,8 @@ static bool update_sd_pick_busiest(struct lb_env *env,
|
|
|
if (!sds->busiest)
|
|
|
return true;
|
|
|
|
|
|
- if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
|
|
|
+ /* Prefer to move from highest possible cpu's work */
|
|
|
+ if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -6834,6 +6838,9 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
|
|
|
if (!(env->sd->flags & SD_ASYM_PACKING))
|
|
|
return 0;
|
|
|
|
|
|
+ if (env->idle == CPU_NOT_IDLE)
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (!sds->busiest)
|
|
|
return 0;
|
|
|
|
|
@@ -7026,8 +7033,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|
|
busiest = &sds.busiest_stat;
|
|
|
|
|
|
/* ASYM feature bypasses nice load balance check */
|
|
|
- if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
|
|
|
- check_asym_packing(env, &sds))
|
|
|
+ if (check_asym_packing(env, &sds))
|
|
|
return sds.busiest;
|
|
|
|
|
|
/* There is no busy sibling group to pull tasks from */
|