|
@@ -6903,6 +6903,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
|
enum s_alloc alloc_state;
|
|
|
struct sched_domain *sd;
|
|
|
struct s_data d;
|
|
|
+ struct rq *rq = NULL;
|
|
|
int i, ret = -ENOMEM;
|
|
|
|
|
|
alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
|
|
@@ -6953,11 +6954,22 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
|
/* Attach the domains */
|
|
|
rcu_read_lock();
|
|
|
for_each_cpu(i, cpu_map) {
|
|
|
+ rq = cpu_rq(i);
|
|
|
sd = *per_cpu_ptr(d.sd, i);
|
|
|
+
|
|
|
+ /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
|
|
|
+ if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
|
|
|
+ WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
|
|
|
+
|
|
|
cpu_attach_domain(sd, d.rd, i);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
+ if (rq) {
|
|
|
+ pr_info("span: %*pbl (max cpu_capacity = %lu)\n",
|
|
|
+ cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
|
|
|
+ }
|
|
|
+
|
|
|
ret = 0;
|
|
|
error:
|
|
|
__free_domain_allocs(&d, alloc_state, cpu_map);
|