|
@@ -5500,7 +5500,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
struct sched_group *group, int load_idx,
|
|
|
int local_group, struct sg_lb_stats *sgs)
|
|
|
{
|
|
|
- unsigned long nr_running;
|
|
|
unsigned long load;
|
|
|
int i;
|
|
|
|
|
@@ -5509,8 +5508,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
|
|
|
struct rq *rq = cpu_rq(i);
|
|
|
|
|
|
- nr_running = rq->nr_running;
|
|
|
-
|
|
|
/* Bias balancing toward cpus of our domain */
|
|
|
if (local_group)
|
|
|
load = target_load(i, load_idx);
|
|
@@ -5518,7 +5515,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
|
|
|
load = source_load(i, load_idx);
|
|
|
|
|
|
sgs->group_load += load;
|
|
|
- sgs->sum_nr_running += nr_running;
|
|
|
+ sgs->sum_nr_running += rq->nr_running;
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
sgs->nr_numa_running += rq->nr_numa_running;
|
|
|
sgs->nr_preferred_running += rq->nr_preferred_running;
|