|
@@ -5947,6 +5947,8 @@ static void destroy_sched_domain(struct sched_domain *sd)
|
|
kfree(sd->groups->sgc);
|
|
kfree(sd->groups->sgc);
|
|
kfree(sd->groups);
|
|
kfree(sd->groups);
|
|
}
|
|
}
|
|
|
|
+ if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
|
|
|
|
+ kfree(sd->shared);
|
|
kfree(sd);
|
|
kfree(sd);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -6385,6 +6387,9 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
|
|
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
|
|
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
|
|
*per_cpu_ptr(sdd->sd, cpu) = NULL;
|
|
*per_cpu_ptr(sdd->sd, cpu) = NULL;
|
|
|
|
|
|
|
|
+ if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref))
|
|
|
|
+ *per_cpu_ptr(sdd->sds, cpu) = NULL;
|
|
|
|
+
|
|
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
|
|
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
|
|
*per_cpu_ptr(sdd->sg, cpu) = NULL;
|
|
*per_cpu_ptr(sdd->sg, cpu) = NULL;
|
|
|
|
|
|
@@ -6429,10 +6434,12 @@ static int sched_domains_curr_level;
|
|
|
|
|
|
static struct sched_domain *
|
|
static struct sched_domain *
|
|
sd_init(struct sched_domain_topology_level *tl,
|
|
sd_init(struct sched_domain_topology_level *tl,
|
|
|
|
+ const struct cpumask *cpu_map,
|
|
struct sched_domain *child, int cpu)
|
|
struct sched_domain *child, int cpu)
|
|
{
|
|
{
|
|
- struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu);
|
|
|
|
- int sd_weight, sd_flags = 0;
|
|
|
|
|
|
+ struct sd_data *sdd = &tl->data;
|
|
|
|
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
|
|
|
|
+ int sd_id, sd_weight, sd_flags = 0;
|
|
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
/*
|
|
/*
|
|
@@ -6487,6 +6494,9 @@ sd_init(struct sched_domain_topology_level *tl,
|
|
#endif
|
|
#endif
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
|
|
|
|
+ sd_id = cpumask_first(sched_domain_span(sd));
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Convert topological properties into behaviour.
|
|
* Convert topological properties into behaviour.
|
|
*/
|
|
*/
|
|
@@ -6529,7 +6539,16 @@ sd_init(struct sched_domain_topology_level *tl,
|
|
sd->idle_idx = 1;
|
|
sd->idle_idx = 1;
|
|
}
|
|
}
|
|
|
|
|
|
- sd->private = &tl->data;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * For all levels sharing cache; connect a sched_domain_shared
|
|
|
|
+ * instance.
|
|
|
|
+ */
|
|
|
|
+ if (sd->flags & SD_SHARE_PKG_RESOURCES) {
|
|
|
|
+ sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
|
|
|
|
+ atomic_inc(&sd->shared->ref);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ sd->private = sdd;
|
|
|
|
|
|
return sd;
|
|
return sd;
|
|
}
|
|
}
|
|
@@ -6839,6 +6858,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
if (!sdd->sd)
|
|
if (!sdd->sd)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
+ sdd->sds = alloc_percpu(struct sched_domain_shared *);
|
|
|
|
+ if (!sdd->sds)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
sdd->sg = alloc_percpu(struct sched_group *);
|
|
sdd->sg = alloc_percpu(struct sched_group *);
|
|
if (!sdd->sg)
|
|
if (!sdd->sg)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
@@ -6849,6 +6872,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
|
|
|
for_each_cpu(j, cpu_map) {
|
|
for_each_cpu(j, cpu_map) {
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
|
|
+ struct sched_domain_shared *sds;
|
|
struct sched_group *sg;
|
|
struct sched_group *sg;
|
|
struct sched_group_capacity *sgc;
|
|
struct sched_group_capacity *sgc;
|
|
|
|
|
|
@@ -6859,6 +6883,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
|
|
|
*per_cpu_ptr(sdd->sd, j) = sd;
|
|
*per_cpu_ptr(sdd->sd, j) = sd;
|
|
|
|
|
|
|
|
+ sds = kzalloc_node(sizeof(struct sched_domain_shared),
|
|
|
|
+ GFP_KERNEL, cpu_to_node(j));
|
|
|
|
+ if (!sds)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ *per_cpu_ptr(sdd->sds, j) = sds;
|
|
|
|
+
|
|
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
GFP_KERNEL, cpu_to_node(j));
|
|
GFP_KERNEL, cpu_to_node(j));
|
|
if (!sg)
|
|
if (!sg)
|
|
@@ -6898,6 +6929,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
|
kfree(*per_cpu_ptr(sdd->sd, j));
|
|
kfree(*per_cpu_ptr(sdd->sd, j));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (sdd->sds)
|
|
|
|
+ kfree(*per_cpu_ptr(sdd->sds, j));
|
|
if (sdd->sg)
|
|
if (sdd->sg)
|
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
|
if (sdd->sgc)
|
|
if (sdd->sgc)
|
|
@@ -6905,6 +6938,8 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
|
}
|
|
}
|
|
free_percpu(sdd->sd);
|
|
free_percpu(sdd->sd);
|
|
sdd->sd = NULL;
|
|
sdd->sd = NULL;
|
|
|
|
+ free_percpu(sdd->sds);
|
|
|
|
+ sdd->sds = NULL;
|
|
free_percpu(sdd->sg);
|
|
free_percpu(sdd->sg);
|
|
sdd->sg = NULL;
|
|
sdd->sg = NULL;
|
|
free_percpu(sdd->sgc);
|
|
free_percpu(sdd->sgc);
|
|
@@ -6916,9 +6951,8 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
|
|
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
|
|
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
|
|
struct sched_domain *child, int cpu)
|
|
struct sched_domain *child, int cpu)
|
|
{
|
|
{
|
|
- struct sched_domain *sd = sd_init(tl, child, cpu);
|
|
|
|
|
|
+ struct sched_domain *sd = sd_init(tl, cpu_map, child, cpu);
|
|
|
|
|
|
- cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
|
|
|
|
if (child) {
|
|
if (child) {
|
|
sd->level = child->level + 1;
|
|
sd->level = child->level + 1;
|
|
sched_domain_level_max = max(sched_domain_level_max, sd->level);
|
|
sched_domain_level_max = max(sched_domain_level_max, sd->level);
|