|
@@ -5981,14 +5981,14 @@ static void destroy_sched_domains(struct sched_domain *sd)
|
|
|
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
|
|
|
DEFINE_PER_CPU(int, sd_llc_size);
|
|
|
DEFINE_PER_CPU(int, sd_llc_id);
|
|
|
+DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
|
|
|
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
|
|
|
-DEFINE_PER_CPU(struct sched_domain *, sd_busy);
|
|
|
DEFINE_PER_CPU(struct sched_domain *, sd_asym);
|
|
|
|
|
|
static void update_top_cache_domain(int cpu)
|
|
|
{
|
|
|
+ struct sched_domain_shared *sds = NULL;
|
|
|
struct sched_domain *sd;
|
|
|
- struct sched_domain *busy_sd = NULL;
|
|
|
int id = cpu;
|
|
|
int size = 1;
|
|
|
|
|
@@ -5996,13 +5996,13 @@ static void update_top_cache_domain(int cpu)
|
|
|
if (sd) {
|
|
|
id = cpumask_first(sched_domain_span(sd));
|
|
|
size = cpumask_weight(sched_domain_span(sd));
|
|
|
- busy_sd = sd->parent; /* sd_busy */
|
|
|
+ sds = sd->shared;
|
|
|
}
|
|
|
- rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
|
|
|
|
|
|
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
|
|
|
per_cpu(sd_llc_size, cpu) = size;
|
|
|
per_cpu(sd_llc_id, cpu) = id;
|
|
|
+ rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
|
|
|
|
|
|
sd = lowest_flag_domain(cpu, SD_NUMA);
|
|
|
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
|
|
@@ -6299,7 +6299,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
|
|
return;
|
|
|
|
|
|
update_group_capacity(sd, cpu);
|
|
|
- atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -6546,6 +6545,7 @@ sd_init(struct sched_domain_topology_level *tl,
|
|
|
if (sd->flags & SD_SHARE_PKG_RESOURCES) {
|
|
|
sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
|
|
|
atomic_inc(&sd->shared->ref);
|
|
|
+ atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
|
|
|
}
|
|
|
|
|
|
sd->private = sdd;
|