|
@@ -5739,7 +5739,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
const struct cpumask *span = sched_domain_span(sd);
|
|
|
struct cpumask *covered = sched_domains_tmpmask;
|
|
|
struct sd_data *sdd = sd->private;
|
|
|
- struct sched_domain *child;
|
|
|
+ struct sched_domain *sibling;
|
|
|
int i;
|
|
|
|
|
|
cpumask_clear(covered);
|
|
@@ -5750,10 +5750,10 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
if (cpumask_test_cpu(i, covered))
|
|
|
continue;
|
|
|
|
|
|
- child = *per_cpu_ptr(sdd->sd, i);
|
|
|
+ sibling = *per_cpu_ptr(sdd->sd, i);
|
|
|
|
|
|
/* See the comment near build_group_mask(). */
|
|
|
- if (!cpumask_test_cpu(i, sched_domain_span(child)))
|
|
|
+ if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
|
|
|
continue;
|
|
|
|
|
|
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
@@ -5763,10 +5763,9 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
goto fail;
|
|
|
|
|
|
sg_span = sched_group_cpus(sg);
|
|
|
- if (child->child) {
|
|
|
- child = child->child;
|
|
|
- cpumask_copy(sg_span, sched_domain_span(child));
|
|
|
- } else
|
|
|
+ if (sibling->child)
|
|
|
+ cpumask_copy(sg_span, sched_domain_span(sibling->child));
|
|
|
+ else
|
|
|
cpumask_set_cpu(i, sg_span);
|
|
|
|
|
|
cpumask_or(covered, covered, sg_span);
|