|
@@ -4284,6 +4284,7 @@ static int wake_wide(struct task_struct *p)
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
{
|
|
{
|
|
s64 this_load, load;
|
|
s64 this_load, load;
|
|
|
|
+ s64 this_eff_load, prev_eff_load;
|
|
int idx, this_cpu, prev_cpu;
|
|
int idx, this_cpu, prev_cpu;
|
|
struct task_group *tg;
|
|
struct task_group *tg;
|
|
unsigned long weight;
|
|
unsigned long weight;
|
|
@@ -4327,21 +4328,21 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
|
* Otherwise check if either cpus are near enough in load to allow this
|
|
* Otherwise check if either cpus are near enough in load to allow this
|
|
* task to be woken on this_cpu.
|
|
* task to be woken on this_cpu.
|
|
*/
|
|
*/
|
|
- if (this_load > 0) {
|
|
|
|
- s64 this_eff_load, prev_eff_load;
|
|
|
|
|
|
+ this_eff_load = 100;
|
|
|
|
+ this_eff_load *= capacity_of(prev_cpu);
|
|
|
|
+
|
|
|
|
+ prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
|
|
|
|
+ prev_eff_load *= capacity_of(this_cpu);
|
|
|
|
|
|
- this_eff_load = 100;
|
|
|
|
- this_eff_load *= capacity_of(prev_cpu);
|
|
|
|
|
|
+ if (this_load > 0) {
|
|
this_eff_load *= this_load +
|
|
this_eff_load *= this_load +
|
|
effective_load(tg, this_cpu, weight, weight);
|
|
effective_load(tg, this_cpu, weight, weight);
|
|
|
|
|
|
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
|
|
|
|
- prev_eff_load *= capacity_of(this_cpu);
|
|
|
|
prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
|
|
prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ balanced = this_eff_load <= prev_eff_load;
|
|
|
|
|
|
- balanced = this_eff_load <= prev_eff_load;
|
|
|
|
- } else
|
|
|
|
- balanced = true;
|
|
|
|
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
|
|
schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
|
|
|
|
|
|
if (!balanced)
|
|
if (!balanced)
|