|
@@ -7424,6 +7424,19 @@ static void attach_tasks(struct lb_env *env)
|
|
|
rq_unlock(env->dst_rq, &rf);
|
|
|
}
|
|
|
|
|
|
+static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
|
|
|
+{
|
|
|
+ if (cfs_rq->avg.load_avg)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (cfs_rq->avg.util_avg)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
+
|
|
|
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
|
|
{
|
|
|
if (cfs_rq->load.weight)
|
|
@@ -7441,8 +7454,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
-
|
|
|
static void update_blocked_averages(int cpu)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
@@ -7478,7 +7489,9 @@ static void update_blocked_averages(int cpu)
|
|
|
*/
|
|
|
if (cfs_rq_is_decayed(cfs_rq))
|
|
|
list_del_leaf_cfs_rq(cfs_rq);
|
|
|
- else
|
|
|
+
|
|
|
+ /* Don't need periodic decay once load/util_avg are null */
|
|
|
+ if (cfs_rq_has_blocked(cfs_rq))
|
|
|
done = false;
|
|
|
}
|
|
|
|
|
@@ -7548,7 +7561,7 @@ static inline void update_blocked_averages(int cpu)
|
|
|
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
rq->last_blocked_load_update_tick = jiffies;
|
|
|
- if (cfs_rq_is_decayed(cfs_rq))
|
|
|
+ if (!cfs_rq_has_blocked(cfs_rq))
|
|
|
rq->has_blocked_load = 0;
|
|
|
#endif
|
|
|
rq_unlock_irqrestore(rq, &rf);
|