|
@@ -6788,6 +6788,11 @@ out_unlock:
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline int on_null_domain(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ return unlikely(!rcu_dereference_sched(rq->sd));
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
/*
|
|
/*
|
|
* idle load balancing details
|
|
* idle load balancing details
|
|
@@ -6842,8 +6847,13 @@ static void nohz_balancer_kick(void)
|
|
static inline void nohz_balance_exit_idle(int cpu)
|
|
static inline void nohz_balance_exit_idle(int cpu)
|
|
{
|
|
{
|
|
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
|
if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
|
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
|
|
|
- atomic_dec(&nohz.nr_cpus);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Completely isolated CPUs don't ever set, so we must test.
|
|
|
|
+ */
|
|
|
|
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
|
|
|
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
|
|
|
+ atomic_dec(&nohz.nr_cpus);
|
|
|
|
+ }
|
|
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -6897,6 +6907,12 @@ void nohz_balance_enter_idle(int cpu)
|
|
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
|
|
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If we're a completely isolated CPU, we don't play.
|
|
|
|
+ */
|
|
|
|
+ if (on_null_domain(cpu_rq(cpu)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
|
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
|
atomic_inc(&nohz.nr_cpus);
|
|
atomic_inc(&nohz.nr_cpus);
|
|
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
|
@@ -7159,11 +7175,6 @@ static void run_rebalance_domains(struct softirq_action *h)
|
|
nohz_idle_balance(this_rq, idle);
|
|
nohz_idle_balance(this_rq, idle);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int on_null_domain(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- return !rcu_dereference_sched(rq->sd);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
|
|
* Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
|
|
*/
|
|
*/
|