|
@@ -7290,6 +7290,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static inline bool rt_rq_has_blocked(struct rq *rq)
|
|
|
+{
|
|
|
+ if (READ_ONCE(rq->avg_rt.util_avg))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
|
|
|
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
|
@@ -7349,6 +7357,10 @@ static void update_blocked_averages(int cpu)
|
|
|
if (cfs_rq_has_blocked(cfs_rq))
|
|
|
done = false;
|
|
|
}
|
|
|
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
+ /* Don't need periodic decay once load/util_avg are null */
|
|
|
+ if (rt_rq_has_blocked(rq))
|
|
|
+ done = false;
|
|
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
rq->last_blocked_load_update_tick = jiffies;
|
|
@@ -7414,9 +7426,10 @@ static inline void update_blocked_averages(int cpu)
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
|
update_rq_clock(rq);
|
|
|
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
|
|
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
rq->last_blocked_load_update_tick = jiffies;
|
|
|
- if (!cfs_rq_has_blocked(cfs_rq))
|
|
|
+ if (!cfs_rq_has_blocked(cfs_rq) && !rt_rq_has_blocked(rq))
|
|
|
rq->has_blocked_load = 0;
|
|
|
#endif
|
|
|
rq_unlock_irqrestore(rq, &rf);
|