|
@@ -185,6 +185,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
|
|
return &cpu_topology[cpu].core_sibling;
|
|
return &cpu_topology[cpu].core_sibling;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The current assumption is that we can power gate each core independently.
|
|
|
|
+ * This will be superseded by DT binding once available.
|
|
|
|
+ */
|
|
|
|
+const struct cpumask *cpu_corepower_mask(int cpu)
|
|
|
|
+{
|
|
|
|
+ return &cpu_topology[cpu].thread_sibling;
|
|
|
|
+}
|
|
|
|
+
|
|
static void update_siblings_masks(unsigned int cpuid)
|
|
static void update_siblings_masks(unsigned int cpuid)
|
|
{
|
|
{
|
|
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
|
|
struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
|
|
@@ -266,6 +275,20 @@ void store_cpu_topology(unsigned int cpuid)
|
|
cpu_topology[cpuid].socket_id, mpidr);
|
|
cpu_topology[cpuid].socket_id, mpidr);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline const int cpu_corepower_flags(void)
|
|
|
|
+{
|
|
|
|
+ return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct sched_domain_topology_level arm_topology[] = {
|
|
|
|
+#ifdef CONFIG_SCHED_MC
|
|
|
|
+ { cpu_corepower_mask, cpu_corepower_flags, SD_INIT_NAME(GMC) },
|
|
|
|
+ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
|
|
|
|
+#endif
|
|
|
|
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
|
|
|
|
+ { NULL, },
|
|
|
|
+};
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* init_cpu_topology is called at boot when only one cpu is running
|
|
* init_cpu_topology is called at boot when only one cpu is running
|
|
* which prevent simultaneous write access to cpu_topology array
|
|
* which prevent simultaneous write access to cpu_topology array
|
|
@@ -289,4 +312,7 @@ void __init init_cpu_topology(void)
|
|
smp_wmb();
|
|
smp_wmb();
|
|
|
|
|
|
parse_dt_topology();
|
|
parse_dt_topology();
|
|
|
|
+
|
|
|
|
+ /* Set scheduler topology descriptor */
|
|
|
|
+ set_sched_topology(arm_topology);
|
|
}
|
|
}
|