|
@@ -682,17 +682,68 @@ void init_entity_runnable_average(struct sched_entity *se)
|
|
|
sa->period_contrib = 1023;
|
|
|
sa->load_avg = scale_load_down(se->load.weight);
|
|
|
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
|
|
|
- sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
|
|
|
- sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
|
|
|
+ /*
|
|
|
+ * At this point, util_avg won't be used in select_task_rq_fair anyway
|
|
|
+ */
|
|
|
+ sa->util_avg = 0;
|
|
|
+ sa->util_sum = 0;
|
|
|
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * With new tasks being created, their initial util_avgs are extrapolated
|
|
|
+ * based on the cfs_rq's current util_avg:
|
|
|
+ *
|
|
|
+ * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
|
|
|
+ *
|
|
|
+ * However, in many cases, the above util_avg does not give a desired
|
|
|
+ * value. Moreover, the sum of the util_avgs may be divergent, such
|
|
|
+ * as when the series is a harmonic series.
|
|
|
+ *
|
|
|
+ * To solve this problem, we also cap the util_avg of successive tasks to
|
|
|
+ * only 1/2 of the left utilization budget:
|
|
|
+ *
|
|
|
+ * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
|
|
|
+ *
|
|
|
+ * where n denotes the nth task.
|
|
|
+ *
|
|
|
+ * For example, a simplest series from the beginning would be like:
|
|
|
+ *
|
|
|
+ * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
|
|
|
+ * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
|
|
|
+ *
|
|
|
+ * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
|
|
|
+ * if util_avg > util_avg_cap.
|
|
|
+ */
|
|
|
+void post_init_entity_util_avg(struct sched_entity *se)
|
|
|
+{
|
|
|
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
|
|
+ struct sched_avg *sa = &se->avg;
|
|
|
+ long cap = (long)(scale_load_down(SCHED_LOAD_SCALE) - cfs_rq->avg.util_avg) / 2;
|
|
|
+
|
|
|
+ if (cap > 0) {
|
|
|
+ if (cfs_rq->avg.util_avg != 0) {
|
|
|
+ sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
|
|
|
+ sa->util_avg /= (cfs_rq->avg.load_avg + 1);
|
|
|
+
|
|
|
+ if (sa->util_avg > cap)
|
|
|
+ sa->util_avg = cap;
|
|
|
+ } else {
|
|
|
+ sa->util_avg = cap;
|
|
|
+ }
|
|
|
+ sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
|
|
|
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
|
|
|
#else
|
|
|
void init_entity_runnable_average(struct sched_entity *se)
|
|
|
{
|
|
|
}
|
|
|
+void post_init_entity_util_avg(struct sched_entity *se)
|
|
|
+{
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -8384,6 +8435,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
|
|
init_cfs_rq(cfs_rq);
|
|
|
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
|
|
init_entity_runnable_average(se);
|
|
|
+ post_init_entity_util_avg(se);
|
|
|
}
|
|
|
|
|
|
return 1;
|