|
@@ -3688,7 +3688,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
|
|
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
|
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
|
|
{
|
|
{
|
|
if (unlikely(cfs_rq->throttle_count))
|
|
if (unlikely(cfs_rq->throttle_count))
|
|
- return cfs_rq->throttled_clock_task;
|
|
|
|
|
|
+ return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
|
|
|
|
|
|
return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
|
|
return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
|
|
}
|
|
}
|
|
@@ -3826,13 +3826,11 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
|
|
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
|
|
struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
|
|
|
|
|
|
cfs_rq->throttle_count--;
|
|
cfs_rq->throttle_count--;
|
|
-#ifdef CONFIG_SMP
|
|
|
|
if (!cfs_rq->throttle_count) {
|
|
if (!cfs_rq->throttle_count) {
|
|
/* adjust cfs_rq_clock_task() */
|
|
/* adjust cfs_rq_clock_task() */
|
|
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
|
|
cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
|
|
cfs_rq->throttled_clock_task;
|
|
cfs_rq->throttled_clock_task;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|