|
@@ -4110,12 +4110,16 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
*/
|
|
*/
|
|
static struct sched_group *
|
|
static struct sched_group *
|
|
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|
- int this_cpu, int load_idx)
|
|
|
|
|
|
+ int this_cpu, int sd_flag)
|
|
{
|
|
{
|
|
struct sched_group *idlest = NULL, *group = sd->groups;
|
|
struct sched_group *idlest = NULL, *group = sd->groups;
|
|
unsigned long min_load = ULONG_MAX, this_load = 0;
|
|
unsigned long min_load = ULONG_MAX, this_load = 0;
|
|
|
|
+ int load_idx = sd->forkexec_idx;
|
|
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
|
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
|
|
|
|
|
|
|
+ if (sd_flag & SD_BALANCE_WAKE)
|
|
|
|
+ load_idx = sd->wake_idx;
|
|
|
|
+
|
|
do {
|
|
do {
|
|
unsigned long load, avg_load;
|
|
unsigned long load, avg_load;
|
|
int local_group;
|
|
int local_group;
|
|
@@ -4283,7 +4287,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|
}
|
|
}
|
|
|
|
|
|
while (sd) {
|
|
while (sd) {
|
|
- int load_idx = sd->forkexec_idx;
|
|
|
|
struct sched_group *group;
|
|
struct sched_group *group;
|
|
int weight;
|
|
int weight;
|
|
|
|
|
|
@@ -4292,10 +4295,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- if (sd_flag & SD_BALANCE_WAKE)
|
|
|
|
- load_idx = sd->wake_idx;
|
|
|
|
-
|
|
|
|
- group = find_idlest_group(sd, p, cpu, load_idx);
|
|
|
|
|
|
+ group = find_idlest_group(sd, p, cpu, sd_flag);
|
|
if (!group) {
|
|
if (!group) {
|
|
sd = sd->child;
|
|
sd = sd->child;
|
|
continue;
|
|
continue;
|