|
@@ -5357,7 +5357,7 @@ static inline bool test_idle_cores(int cpu, bool def)
|
|
* Since SMT siblings share all cache levels, inspecting this limited remote
|
|
* Since SMT siblings share all cache levels, inspecting this limited remote
|
|
* state should be fairly cheap.
|
|
* state should be fairly cheap.
|
|
*/
|
|
*/
|
|
-void update_idle_core(struct rq *rq)
|
|
|
|
|
|
+void __update_idle_core(struct rq *rq)
|
|
{
|
|
{
|
|
int core = cpu_of(rq);
|
|
int core = cpu_of(rq);
|
|
int cpu;
|
|
int cpu;
|
|
@@ -5389,6 +5389,9 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int
|
|
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
|
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
|
|
int core, cpu, wrap;
|
|
int core, cpu, wrap;
|
|
|
|
|
|
|
|
+ if (!static_branch_likely(&sched_smt_present))
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
if (!test_idle_cores(target, false))
|
|
if (!test_idle_cores(target, false))
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
@@ -5422,6 +5425,9 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
|
|
+ if (!static_branch_likely(&sched_smt_present))
|
|
|
|
+ return -1;
|
|
|
|
+
|
|
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
|
for_each_cpu(cpu, cpu_smt_mask(target)) {
|
|
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
|
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
|
|
continue;
|
|
continue;
|