|
@@ -3219,6 +3219,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
sub_positive(&sa->load_avg, r);
|
|
|
sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
|
|
|
removed_load = 1;
|
|
|
+ set_tg_cfs_propagate(cfs_rq);
|
|
|
}
|
|
|
|
|
|
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
|
|
@@ -3226,6 +3227,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
|
|
sub_positive(&sa->util_avg, r);
|
|
|
sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
|
|
|
removed_util = 1;
|
|
|
+ set_tg_cfs_propagate(cfs_rq);
|
|
|
}
|
|
|
|
|
|
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
|
|
@@ -6872,6 +6874,10 @@ static void update_blocked_averages(int cpu)
|
|
|
|
|
|
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
|
|
|
update_tg_load_avg(cfs_rq, 0);
|
|
|
+
|
|
|
+ /* Propagate pending load changes to the parent */
|
|
|
+ if (cfs_rq->tg->se[cpu])
|
|
|
+ update_load_avg(cfs_rq->tg->se[cpu], 0);
|
|
|
}
|
|
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
|
}
|