|
@@ -5777,6 +5777,18 @@ int sched_cpu_activate(unsigned int cpu)
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
struct rq_flags rf;
|
|
|
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
+ /*
|
|
|
+ * The sched_smt_present static key needs to be evaluated on every
|
|
|
+ * hotplug event because at boot time SMT might be disabled when
|
|
|
+ * the number of booted CPUs is limited.
|
|
|
+ *
|
|
|
+ * If then later a sibling gets hotplugged, then the key would stay
|
|
|
+ * off and SMT scheduling would never be functional.
|
|
|
+ */
|
|
|
+ if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
|
|
|
+ static_branch_enable_cpuslocked(&sched_smt_present);
|
|
|
+#endif
|
|
|
set_cpu_active(cpu, true);
|
|
|
|
|
|
if (sched_smp_initialized) {
|
|
@@ -5874,22 +5886,6 @@ int sched_cpu_dying(unsigned int cpu)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
-#ifdef CONFIG_SCHED_SMT
|
|
|
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
|
|
|
-
|
|
|
-static void sched_init_smt(void)
|
|
|
-{
|
|
|
- /*
|
|
|
- * We've enumerated all CPUs and will assume that if any CPU
|
|
|
- * has SMT siblings, CPU0 will too.
|
|
|
- */
|
|
|
- if (cpumask_weight(cpu_smt_mask(0)) > 1)
|
|
|
- static_branch_enable(&sched_smt_present);
|
|
|
-}
|
|
|
-#else
|
|
|
-static inline void sched_init_smt(void) { }
|
|
|
-#endif
|
|
|
-
|
|
|
void __init sched_init_smp(void)
|
|
|
{
|
|
|
sched_init_numa();
|
|
@@ -5911,8 +5907,6 @@ void __init sched_init_smp(void)
|
|
|
init_sched_rt_class();
|
|
|
init_sched_dl_class();
|
|
|
|
|
|
- sched_init_smt();
|
|
|
-
|
|
|
sched_smp_initialized = true;
|
|
|
}
|
|
|
|