|
@@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
|
|
|
if (!sched_feat(UTIL_EST))
|
|
|
return;
|
|
|
|
|
|
- /*
|
|
|
- * Update root cfs_rq's estimated utilization
|
|
|
- *
|
|
|
- * If *p is the last task then the root cfs_rq's estimated utilization
|
|
|
- * of a CPU is 0 by definition.
|
|
|
- */
|
|
|
- ue.enqueued = 0;
|
|
|
- if (cfs_rq->nr_running) {
|
|
|
- ue.enqueued = cfs_rq->avg.util_est.enqueued;
|
|
|
- ue.enqueued -= min_t(unsigned int, ue.enqueued,
|
|
|
- (_task_util_est(p) | UTIL_AVG_UNCHANGED));
|
|
|
- }
|
|
|
+ /* Update root cfs_rq's estimated utilization */
|
|
|
+ ue.enqueued = cfs_rq->avg.util_est.enqueued;
|
|
|
+ ue.enqueued -= min_t(unsigned int, ue.enqueued,
|
|
|
+ (_task_util_est(p) | UTIL_AVG_UNCHANGED));
|
|
|
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
|
|
|
|
|
|
/*
|