|
@@ -200,8 +200,6 @@ struct timer_base {
|
|
unsigned long clk;
|
|
unsigned long clk;
|
|
unsigned long next_expiry;
|
|
unsigned long next_expiry;
|
|
unsigned int cpu;
|
|
unsigned int cpu;
|
|
- bool migration_enabled;
|
|
|
|
- bool nohz_active;
|
|
|
|
bool is_idle;
|
|
bool is_idle;
|
|
bool must_forward_clk;
|
|
bool must_forward_clk;
|
|
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
|
|
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
|
|
@@ -210,45 +208,57 @@ struct timer_base {
|
|
|
|
|
|
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
|
|
static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
|
|
|
|
|
|
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
|
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
+
|
|
|
|
+DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
|
|
|
|
+static DEFINE_MUTEX(timer_keys_mutex);
|
|
|
|
+
|
|
|
|
+static void timer_update_keys(struct work_struct *work);
|
|
|
|
+static DECLARE_WORK(timer_update_work, timer_update_keys);
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_SMP
|
|
unsigned int sysctl_timer_migration = 1;
|
|
unsigned int sysctl_timer_migration = 1;
|
|
|
|
|
|
-void timers_update_migration(bool update_nohz)
|
|
|
|
|
|
+DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
|
|
|
|
+
|
|
|
|
+static void timers_update_migration(void)
|
|
{
|
|
{
|
|
- bool on = sysctl_timer_migration && tick_nohz_active;
|
|
|
|
- unsigned int cpu;
|
|
|
|
|
|
+ if (sysctl_timer_migration && tick_nohz_active)
|
|
|
|
+ static_branch_enable(&timers_migration_enabled);
|
|
|
|
+ else
|
|
|
|
+ static_branch_disable(&timers_migration_enabled);
|
|
|
|
+}
|
|
|
|
+#else
|
|
|
|
+static inline void timers_update_migration(void) { }
|
|
|
|
+#endif /* !CONFIG_SMP */
|
|
|
|
|
|
- /* Avoid the loop, if nothing to update */
|
|
|
|
- if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
|
|
|
|
- return;
|
|
|
|
|
|
+static void timer_update_keys(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ mutex_lock(&timer_keys_mutex);
|
|
|
|
+ timers_update_migration();
|
|
|
|
+ static_branch_enable(&timers_nohz_active);
|
|
|
|
+ mutex_unlock(&timer_keys_mutex);
|
|
|
|
+}
|
|
|
|
|
|
- for_each_possible_cpu(cpu) {
|
|
|
|
- per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
|
|
|
|
- per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
|
|
|
|
- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
|
|
|
|
- if (!update_nohz)
|
|
|
|
- continue;
|
|
|
|
- per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
|
|
|
|
- per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
|
|
|
|
- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
|
|
|
|
- }
|
|
|
|
|
|
+void timers_update_nohz(void)
|
|
|
|
+{
|
|
|
|
+ schedule_work(&timer_update_work);
|
|
}
|
|
}
|
|
|
|
|
|
int timer_migration_handler(struct ctl_table *table, int write,
|
|
int timer_migration_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp,
|
|
void __user *buffer, size_t *lenp,
|
|
loff_t *ppos)
|
|
loff_t *ppos)
|
|
{
|
|
{
|
|
- static DEFINE_MUTEX(mutex);
|
|
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- mutex_lock(&mutex);
|
|
|
|
|
|
+ mutex_lock(&timer_keys_mutex);
|
|
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
|
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
|
if (!ret && write)
|
|
if (!ret && write)
|
|
- timers_update_migration(false);
|
|
|
|
- mutex_unlock(&mutex);
|
|
|
|
|
|
+ timers_update_migration();
|
|
|
|
+ mutex_unlock(&timer_keys_mutex);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
-#endif
|
|
|
|
|
|
+#endif /* NO_HZ_COMMON */
|
|
|
|
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
|
bool force_up)
|
|
bool force_up)
|
|
@@ -534,7 +544,7 @@ __internal_add_timer(struct timer_base *base, struct timer_list *timer)
|
|
static void
|
|
static void
|
|
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
|
|
trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
|
|
{
|
|
{
|
|
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
|
|
|
|
|
|
+ if (!is_timers_nohz_active())
|
|
return;
|
|
return;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -849,21 +859,20 @@ static inline struct timer_base *get_timer_base(u32 tflags)
|
|
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
|
|
return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
static inline struct timer_base *
|
|
static inline struct timer_base *
|
|
get_target_base(struct timer_base *base, unsigned tflags)
|
|
get_target_base(struct timer_base *base, unsigned tflags)
|
|
{
|
|
{
|
|
-#ifdef CONFIG_SMP
|
|
|
|
- if ((tflags & TIMER_PINNED) || !base->migration_enabled)
|
|
|
|
- return get_timer_this_cpu_base(tflags);
|
|
|
|
- return get_timer_cpu_base(tflags, get_nohz_timer_target());
|
|
|
|
-#else
|
|
|
|
- return get_timer_this_cpu_base(tflags);
|
|
|
|
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
|
|
|
|
+ if (static_branch_likely(&timers_migration_enabled) &&
|
|
|
|
+ !(tflags & TIMER_PINNED))
|
|
|
|
+ return get_timer_cpu_base(tflags, get_nohz_timer_target());
|
|
#endif
|
|
#endif
|
|
|
|
+ return get_timer_this_cpu_base(tflags);
|
|
}
|
|
}
|
|
|
|
|
|
static inline void forward_timer_base(struct timer_base *base)
|
|
static inline void forward_timer_base(struct timer_base *base)
|
|
{
|
|
{
|
|
|
|
+#ifdef CONFIG_NO_HZ_COMMON
|
|
unsigned long jnow;
|
|
unsigned long jnow;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -887,16 +896,8 @@ static inline void forward_timer_base(struct timer_base *base)
|
|
base->clk = jnow;
|
|
base->clk = jnow;
|
|
else
|
|
else
|
|
base->clk = base->next_expiry;
|
|
base->clk = base->next_expiry;
|
|
-}
|
|
|
|
-#else
|
|
|
|
-static inline struct timer_base *
|
|
|
|
-get_target_base(struct timer_base *base, unsigned tflags)
|
|
|
|
-{
|
|
|
|
- return get_timer_this_cpu_base(tflags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline void forward_timer_base(struct timer_base *base) { }
|
|
|
|
#endif
|
|
#endif
|
|
|
|
+}
|
|
|
|
|
|
|
|
|
|
/*
|
|
/*
|