|
@@ -5866,7 +5866,8 @@ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *gro
|
|
|
*/
|
|
|
static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
struct sched_group *group, int load_idx,
|
|
|
- int local_group, struct sg_lb_stats *sgs)
|
|
|
+ int local_group, struct sg_lb_stats *sgs,
|
|
|
+ bool *overload)
|
|
|
{
|
|
|
unsigned long load;
|
|
|
int i;
|
|
@@ -5884,6 +5885,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
|
|
|
sgs->group_load += load;
|
|
|
sgs->sum_nr_running += rq->nr_running;
|
|
|
+
|
|
|
+ if (rq->nr_running > 1)
|
|
|
+ *overload = true;
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
sgs->nr_numa_running += rq->nr_numa_running;
|
|
|
sgs->nr_preferred_running += rq->nr_preferred_running;
|
|
@@ -5994,6 +5999,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|
|
struct sched_group *sg = env->sd->groups;
|
|
|
struct sg_lb_stats tmp_sgs;
|
|
|
int load_idx, prefer_sibling = 0;
|
|
|
+ bool overload = false;
|
|
|
|
|
|
if (child && child->flags & SD_PREFER_SIBLING)
|
|
|
prefer_sibling = 1;
|
|
@@ -6014,7 +6020,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
|
|
|
update_group_capacity(env->sd, env->dst_cpu);
|
|
|
}
|
|
|
|
|
|
- update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
|
|
|
+ update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
|
|
|
+ &overload);
|
|
|
|
|
|
if (local_group)
|
|
|
goto next_group;
|
|
@@ -6048,6 +6055,13 @@ next_group:
|
|
|
|
|
|
if (env->sd->flags & SD_NUMA)
|
|
|
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
|
|
|
+
|
|
|
+ if (!env->sd->parent) {
|
|
|
+ /* update overload indicator if we are at root domain */
|
|
|
+ if (env->dst_rq->rd->overload != overload)
|
|
|
+ env->dst_rq->rd->overload = overload;
|
|
|
+ }
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -6766,7 +6780,8 @@ static int idle_balance(struct rq *this_rq)
|
|
|
*/
|
|
|
this_rq->idle_stamp = rq_clock(this_rq);
|
|
|
|
|
|
- if (this_rq->avg_idle < sysctl_sched_migration_cost) {
|
|
|
+ if (this_rq->avg_idle < sysctl_sched_migration_cost ||
|
|
|
+ !this_rq->rd->overload) {
|
|
|
rcu_read_lock();
|
|
|
sd = rcu_dereference_check_sched_domain(this_rq->sd);
|
|
|
if (sd)
|