|
@@ -4202,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|
|
if (!cfs_bandwidth_used())
|
|
|
return;
|
|
|
|
|
|
+ /* Synchronize hierarchical throttle counter: */
|
|
|
+ if (unlikely(!cfs_rq->throttle_uptodate)) {
|
|
|
+ struct rq *rq = rq_of(cfs_rq);
|
|
|
+ struct cfs_rq *pcfs_rq;
|
|
|
+ struct task_group *tg;
|
|
|
+
|
|
|
+ cfs_rq->throttle_uptodate = 1;
|
|
|
+
|
|
|
+ /* Get closest up-to-date node, because leaves go first: */
|
|
|
+ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
|
|
|
+ pcfs_rq = tg->cfs_rq[cpu_of(rq)];
|
|
|
+ if (pcfs_rq->throttle_uptodate)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (tg) {
|
|
|
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
|
|
+ cfs_rq->throttled_clock_task = rq_clock_task(rq);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* an active group must be handled by the update_curr()->put() path */
|
|
|
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
|
|
|
return;
|