|
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
|
|
|
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
|
|
|
#else
|
|
|
void init_entity_runnable_average(struct sched_entity *se)
|
|
|
{
|
|
@@ -4946,19 +4944,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
|
|
return wl;
|
|
|
|
|
|
for_each_sched_entity(se) {
|
|
|
- long w, W;
|
|
|
+ struct cfs_rq *cfs_rq = se->my_q;
|
|
|
+ long W, w = cfs_rq_load_avg(cfs_rq);
|
|
|
|
|
|
- tg = se->my_q->tg;
|
|
|
+ tg = cfs_rq->tg;
|
|
|
|
|
|
/*
|
|
|
* W = @wg + \Sum rw_j
|
|
|
*/
|
|
|
- W = wg + calc_tg_weight(tg, se->my_q);
|
|
|
+ W = wg + atomic_long_read(&tg->load_avg);
|
|
|
+
|
|
|
+ /* Ensure \Sum rw_j >= rw_i */
|
|
|
+ W -= cfs_rq->tg_load_avg_contrib;
|
|
|
+ W += w;
|
|
|
|
|
|
/*
|
|
|
* w = rw_i + @wl
|
|
|
*/
|
|
|
- w = cfs_rq_load_avg(se->my_q) + wl;
|
|
|
+ w += wl;
|
|
|
|
|
|
/*
|
|
|
* wl = S * s'_i; see (2)
|