|
@@ -7898,7 +7898,7 @@ group_type group_classify(struct sched_group *group,
|
|
|
return group_other;
|
|
|
}
|
|
|
|
|
|
-static bool update_nohz_stats(struct rq *rq)
|
|
|
+static bool update_nohz_stats(struct rq *rq, bool force)
|
|
|
{
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
unsigned int cpu = rq->cpu;
|
|
@@ -7909,7 +7909,7 @@ static bool update_nohz_stats(struct rq *rq)
|
|
|
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
|
|
|
return false;
|
|
|
|
|
|
- if (!time_after(jiffies, rq->last_blocked_load_update_tick))
|
|
|
+ if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
|
|
|
return true;
|
|
|
|
|
|
update_blocked_averages(cpu);
|
|
@@ -7942,7 +7942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
|
|
|
struct rq *rq = cpu_rq(i);
|
|
|
|
|
|
- if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq))
|
|
|
+ if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
|
|
|
env->flags |= LBF_NOHZ_AGAIN;
|
|
|
|
|
|
/* Bias balancing toward CPUs of our domain: */
|
|
@@ -9552,8 +9552,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
|
|
|
|
|
|
rq = cpu_rq(balance_cpu);
|
|
|
|
|
|
- update_blocked_averages(rq->cpu);
|
|
|
- has_blocked_load |= rq->has_blocked_load;
|
|
|
+ has_blocked_load |= update_nohz_stats(rq, true);
|
|
|
|
|
|
/*
|
|
|
* If time for next balance is due,
|