|
@@ -1274,6 +1274,7 @@ static bool shared_caches;
|
|
|
void start_secondary(void *unused)
|
|
|
{
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
+ struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
|
|
|
|
|
|
mmgrab(&init_mm);
|
|
|
current->active_mm = &init_mm;
|
|
@@ -1299,11 +1300,13 @@ void start_secondary(void *unused)
|
|
|
/* Update topology CPU masks */
|
|
|
add_cpu_to_masks(cpu);
|
|
|
|
|
|
+ if (has_big_cores)
|
|
|
+ sibling_mask = cpu_smallcore_mask;
|
|
|
/*
|
|
|
* Check for any shared caches. Note that this must be done on a
|
|
|
* per-core basis because one core in the pair might be disabled.
|
|
|
*/
|
|
|
- if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
|
|
|
+ if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
|
|
|
shared_caches = true;
|
|
|
|
|
|
set_numa_node(numa_cpu_lookup_table[cpu]);
|
|
@@ -1370,6 +1373,13 @@ static const struct cpumask *shared_cache_mask(int cpu)
|
|
|
return cpu_l2_cache_mask(cpu);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
+static const struct cpumask *smallcore_smt_mask(int cpu)
|
|
|
+{
|
|
|
+ return cpu_smallcore_mask(cpu);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static struct sched_domain_topology_level power9_topology[] = {
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
|
|
@@ -1397,6 +1407,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
shared_proc_topology_init();
|
|
|
dump_numa_cpu_topology();
|
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
+ if (has_big_cores) {
|
|
|
+ pr_info("Using small cores at SMT level\n");
|
|
|
+ power9_topology[0].mask = smallcore_smt_mask;
|
|
|
+ powerpc_topology[0].mask = smallcore_smt_mask;
|
|
|
+ }
|
|
|
+#endif
|
|
|
/*
|
|
|
* If any CPU detects that it's sharing a cache with another CPU then
|
|
|
* use the deeper topology that is aware of this sharing.
|