|
@@ -7263,6 +7263,7 @@ static void update_blocked_averages(int cpu)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
struct cfs_rq *cfs_rq, *pos;
|
|
|
+ const struct sched_class *curr_class;
|
|
|
struct rq_flags rf;
|
|
|
bool done = true;
|
|
|
|
|
@@ -7299,8 +7300,10 @@ static void update_blocked_averages(int cpu)
|
|
|
if (cfs_rq_has_blocked(cfs_rq))
|
|
|
done = false;
|
|
|
}
|
|
|
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
+
|
|
|
+ curr_class = rq->curr->sched_class;
|
|
|
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
|
|
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
|
|
update_irq_load_avg(rq, 0);
|
|
|
/* Don't need periodic decay once load/util_avg are null */
|
|
|
if (others_have_blocked(rq))
|
|
@@ -7365,13 +7368,16 @@ static inline void update_blocked_averages(int cpu)
|
|
|
{
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
struct cfs_rq *cfs_rq = &rq->cfs;
|
|
|
+ const struct sched_class *curr_class;
|
|
|
struct rq_flags rf;
|
|
|
|
|
|
rq_lock_irqsave(rq, &rf);
|
|
|
update_rq_clock(rq);
|
|
|
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
|
|
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
|
|
|
+
|
|
|
+ curr_class = rq->curr->sched_class;
|
|
|
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
|
|
|
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
|
|
|
update_irq_load_avg(rq, 0);
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
rq->last_blocked_load_update_tick = jiffies;
|