|
@@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|
|
if (!cfs_bandwidth_used())
|
|
|
return;
|
|
|
|
|
|
- /* Synchronize hierarchical throttle counter: */
|
|
|
- if (unlikely(!cfs_rq->throttle_uptodate)) {
|
|
|
- struct rq *rq = rq_of(cfs_rq);
|
|
|
- struct cfs_rq *pcfs_rq;
|
|
|
- struct task_group *tg;
|
|
|
-
|
|
|
- cfs_rq->throttle_uptodate = 1;
|
|
|
-
|
|
|
- /* Get closest up-to-date node, because leaves go first: */
|
|
|
- for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
|
|
|
- pcfs_rq = tg->cfs_rq[cpu_of(rq)];
|
|
|
- if (pcfs_rq->throttle_uptodate)
|
|
|
- break;
|
|
|
- }
|
|
|
- if (tg) {
|
|
|
- cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
|
|
- cfs_rq->throttled_clock_task = rq_clock_task(rq);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/* an active group must be handled by the update_curr()->put() path */
|
|
|
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
|
|
|
return;
|
|
@@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
|
|
|
throttle_cfs_rq(cfs_rq);
|
|
|
}
|
|
|
|
|
|
+static void sync_throttle(struct task_group *tg, int cpu)
|
|
|
+{
|
|
|
+ struct cfs_rq *pcfs_rq, *cfs_rq;
|
|
|
+
|
|
|
+ if (!cfs_bandwidth_used())
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!tg->parent)
|
|
|
+ return;
|
|
|
+
|
|
|
+ cfs_rq = tg->cfs_rq[cpu];
|
|
|
+ pcfs_rq = tg->parent->cfs_rq[cpu];
|
|
|
+
|
|
|
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
|
|
+ pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
|
|
+}
|
|
|
+
|
|
|
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
|
|
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
@@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
|
|
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
|
|
|
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
|
|
|
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
|
|
|
+static inline void sync_throttle(struct task_group *tg, int cpu) {}
|
|
|
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
|
|
|
|
|
|
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
|
|
@@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)
|
|
|
|
|
|
raw_spin_lock_irq(&rq->lock);
|
|
|
post_init_entity_util_avg(se);
|
|
|
+ sync_throttle(tg, i);
|
|
|
raw_spin_unlock_irq(&rq->lock);
|
|
|
}
|
|
|
}
|