|
@@ -5363,6 +5363,10 @@ static int wake_wide(struct task_struct *p)
|
|
|
*
|
|
*
|
|
|
* wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
|
|
* wake_affine_idle() - only considers 'now', it check if the waking CPU is (or
|
|
|
* will be) idle.
|
|
* will be) idle.
|
|
|
|
|
+ *
|
|
|
|
|
+ * wake_affine_weight() - considers the weight to reflect the average
|
|
|
|
|
+ * scheduling latency of the CPUs. This seems to work
|
|
|
|
|
+ * for the overloaded case.
|
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
static bool
|
|
static bool
|
|
@@ -5378,6 +5382,40 @@ wake_affine_idle(struct sched_domain *sd, struct task_struct *p,
|
|
|
return false;
|
|
return false;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static bool
|
|
|
|
|
+wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
|
|
|
|
+ int this_cpu, int prev_cpu, int sync)
|
|
|
|
|
+{
|
|
|
|
|
+ s64 this_eff_load, prev_eff_load;
|
|
|
|
|
+ unsigned long task_load;
|
|
|
|
|
+
|
|
|
|
|
+ this_eff_load = target_load(this_cpu, sd->wake_idx);
|
|
|
|
|
+ prev_eff_load = source_load(prev_cpu, sd->wake_idx);
|
|
|
|
|
+
|
|
|
|
|
+ if (sync) {
|
|
|
|
|
+ unsigned long current_load = task_h_load(current);
|
|
|
|
|
+
|
|
|
|
|
+ if (current_load > this_eff_load)
|
|
|
|
|
+ return true;
|
|
|
|
|
+
|
|
|
|
|
+ this_eff_load -= current_load;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ task_load = task_h_load(p);
|
|
|
|
|
+
|
|
|
|
|
+ this_eff_load += task_load;
|
|
|
|
|
+ if (sched_feat(WA_BIAS))
|
|
|
|
|
+ this_eff_load *= 100;
|
|
|
|
|
+ this_eff_load *= capacity_of(prev_cpu);
|
|
|
|
|
+
|
|
|
|
|
+ prev_eff_load -= task_load;
|
|
|
|
|
+ if (sched_feat(WA_BIAS))
|
|
|
|
|
+ prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
|
|
|
|
|
+ prev_eff_load *= capacity_of(this_cpu);
|
|
|
|
|
+
|
|
|
|
|
+ return this_eff_load <= prev_eff_load;
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
|
int prev_cpu, int sync)
|
|
int prev_cpu, int sync)
|
|
|
{
|
|
{
|
|
@@ -5387,6 +5425,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
|
|
if (sched_feat(WA_IDLE) && !affine)
|
|
if (sched_feat(WA_IDLE) && !affine)
|
|
|
affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
|
|
affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync);
|
|
|
|
|
|
|
|
|
|
+ if (sched_feat(WA_WEIGHT) && !affine)
|
|
|
|
|
+ affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
|
|
|
|
|
+
|
|
|
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
|
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
|
|
if (affine) {
|
|
if (affine) {
|
|
|
schedstat_inc(sd->ttwu_move_affine);
|
|
schedstat_inc(sd->ttwu_move_affine);
|