|
@@ -5692,7 +5692,7 @@ static int wake_wide(struct task_struct *p)
|
|
* scheduling latency of the CPUs. This seems to work
|
|
* scheduling latency of the CPUs. This seems to work
|
|
* for the overloaded case.
|
|
* for the overloaded case.
|
|
*/
|
|
*/
|
|
-static bool
|
|
|
|
|
|
+static int
|
|
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
|
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
@@ -5702,15 +5702,15 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
|
* node depending on the IO topology or IRQ affinity settings.
|
|
* node depending on the IO topology or IRQ affinity settings.
|
|
*/
|
|
*/
|
|
if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
|
|
if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
|
|
- return true;
|
|
|
|
|
|
+ return this_cpu;
|
|
|
|
|
|
if (sync && cpu_rq(this_cpu)->nr_running == 1)
|
|
if (sync && cpu_rq(this_cpu)->nr_running == 1)
|
|
- return true;
|
|
|
|
|
|
+ return this_cpu;
|
|
|
|
|
|
- return false;
|
|
|
|
|
|
+ return nr_cpumask_bits;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool
|
|
|
|
|
|
+static int
|
|
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
|
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
|
int this_cpu, int prev_cpu, int sync)
|
|
int this_cpu, int prev_cpu, int sync)
|
|
{
|
|
{
|
|
@@ -5724,7 +5724,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
|
unsigned long current_load = task_h_load(current);
|
|
unsigned long current_load = task_h_load(current);
|
|
|
|
|
|
if (current_load > this_eff_load)
|
|
if (current_load > this_eff_load)
|
|
- return true;
|
|
|
|
|
|
+ return this_cpu;
|
|
|
|
|
|
this_eff_load -= current_load;
|
|
this_eff_load -= current_load;
|
|
}
|
|
}
|
|
@@ -5741,28 +5741,28 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
|
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
|
|
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
|
|
prev_eff_load *= capacity_of(this_cpu);
|
|
prev_eff_load *= capacity_of(this_cpu);
|
|
|
|
|
|
- return this_eff_load <= prev_eff_load;
|
|
|
|
|
|
+ return this_eff_load <= prev_eff_load ? this_cpu : nr_cpumask_bits;
|
|
}
|
|
}
|
|
|
|
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
int prev_cpu, int sync)
|
|
int prev_cpu, int sync)
|
|
{
|
|
{
|
|
int this_cpu = smp_processor_id();
|
|
int this_cpu = smp_processor_id();
|
|
- bool affine = false;
|
|
|
|
|
|
+ int target = nr_cpumask_bits;
|
|
|
|
|
|
if (sched_feat(WA_IDLE))
|
|
if (sched_feat(WA_IDLE))
|
|
- affine = wake_affine_idle(this_cpu, prev_cpu, sync);
|
|
|
|
|
|
+ target = wake_affine_idle(this_cpu, prev_cpu, sync);
|
|
|
|
|
|
- if (sched_feat(WA_WEIGHT) && !affine)
|
|
|
|
- affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
|
|
|
|
|
|
+ if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
|
|
|
|
+ target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
|
|
|
|
|
|
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
|
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
|
- if (affine) {
|
|
|
|
- schedstat_inc(sd->ttwu_move_affine);
|
|
|
|
- schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
|
|
|
- }
|
|
|
|
|
|
+ if (target == nr_cpumask_bits)
|
|
|
|
+ return prev_cpu;
|
|
|
|
|
|
- return affine;
|
|
|
|
|
|
+ schedstat_inc(sd->ttwu_move_affine);
|
|
|
|
+ schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
|
|
|
+ return target;
|
|
}
|
|
}
|
|
|
|
|
|
static inline unsigned long task_util(struct task_struct *p);
|
|
static inline unsigned long task_util(struct task_struct *p);
|
|
@@ -6355,8 +6355,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|
if (cpu == prev_cpu)
|
|
if (cpu == prev_cpu)
|
|
goto pick_cpu;
|
|
goto pick_cpu;
|
|
|
|
|
|
- if (wake_affine(affine_sd, p, prev_cpu, sync))
|
|
|
|
- new_cpu = cpu;
|
|
|
|
|
|
+ new_cpu = wake_affine(affine_sd, p, prev_cpu, sync);
|
|
}
|
|
}
|
|
|
|
|
|
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|
|
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|