|
@@ -3060,17 +3060,6 @@ done:
|
|
|
return rotate;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_NO_HZ_FULL
|
|
|
-bool perf_event_can_stop_tick(void)
|
|
|
-{
|
|
|
- if (atomic_read(&nr_freq_events) ||
|
|
|
- __this_cpu_read(perf_throttled_count))
|
|
|
- return false;
|
|
|
- else
|
|
|
- return true;
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
void perf_event_task_tick(void)
|
|
|
{
|
|
|
struct list_head *head = this_cpu_ptr(&active_ctx_list);
|
|
@@ -3081,6 +3070,7 @@ void perf_event_task_tick(void)
|
|
|
|
|
|
__this_cpu_inc(perf_throttled_seq);
|
|
|
throttled = __this_cpu_xchg(perf_throttled_count, 0);
|
|
|
+ tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
|
|
|
|
|
|
list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
|
|
|
perf_adjust_freq_unthr_context(ctx, throttled);
|
|
@@ -3511,6 +3501,28 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_NO_HZ_FULL
|
|
|
+static DEFINE_SPINLOCK(nr_freq_lock);
|
|
|
+#endif
|
|
|
+
|
|
|
+static void unaccount_freq_event_nohz(void)
|
|
|
+{
|
|
|
+#ifdef CONFIG_NO_HZ_FULL
|
|
|
+ spin_lock(&nr_freq_lock);
|
|
|
+ if (atomic_dec_and_test(&nr_freq_events))
|
|
|
+ tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
|
|
|
+ spin_unlock(&nr_freq_lock);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void unaccount_freq_event(void)
|
|
|
+{
|
|
|
+ if (tick_nohz_full_enabled())
|
|
|
+ unaccount_freq_event_nohz();
|
|
|
+ else
|
|
|
+ atomic_dec(&nr_freq_events);
|
|
|
+}
|
|
|
+
|
|
|
static void unaccount_event(struct perf_event *event)
|
|
|
{
|
|
|
bool dec = false;
|
|
@@ -3527,7 +3539,7 @@ static void unaccount_event(struct perf_event *event)
|
|
|
if (event->attr.task)
|
|
|
atomic_dec(&nr_task_events);
|
|
|
if (event->attr.freq)
|
|
|
- atomic_dec(&nr_freq_events);
|
|
|
+ unaccount_freq_event();
|
|
|
if (event->attr.context_switch) {
|
|
|
dec = true;
|
|
|
atomic_dec(&nr_switch_events);
|
|
@@ -6349,9 +6361,9 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
if (unlikely(throttle
|
|
|
&& hwc->interrupts >= max_samples_per_tick)) {
|
|
|
__this_cpu_inc(perf_throttled_count);
|
|
|
+ tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
|
|
|
hwc->interrupts = MAX_INTERRUPTS;
|
|
|
perf_log_throttle(event, 0);
|
|
|
- tick_nohz_full_kick();
|
|
|
ret = 1;
|
|
|
}
|
|
|
}
|
|
@@ -7741,6 +7753,27 @@ static void account_event_cpu(struct perf_event *event, int cpu)
|
|
|
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
|
|
}
|
|
|
|
|
|
+/* Freq events need the tick to stay alive (see perf_event_task_tick). */
|
|
|
+static void account_freq_event_nohz(void)
|
|
|
+{
|
|
|
+#ifdef CONFIG_NO_HZ_FULL
|
|
|
+ /* Lock so we don't race with concurrent unaccount */
|
|
|
+ spin_lock(&nr_freq_lock);
|
|
|
+ if (atomic_inc_return(&nr_freq_events) == 1)
|
|
|
+ tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
|
|
|
+ spin_unlock(&nr_freq_lock);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void account_freq_event(void)
|
|
|
+{
|
|
|
+ if (tick_nohz_full_enabled())
|
|
|
+ account_freq_event_nohz();
|
|
|
+ else
|
|
|
+ atomic_inc(&nr_freq_events);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static void account_event(struct perf_event *event)
|
|
|
{
|
|
|
bool inc = false;
|
|
@@ -7756,10 +7789,8 @@ static void account_event(struct perf_event *event)
|
|
|
atomic_inc(&nr_comm_events);
|
|
|
if (event->attr.task)
|
|
|
atomic_inc(&nr_task_events);
|
|
|
- if (event->attr.freq) {
|
|
|
- if (atomic_inc_return(&nr_freq_events) == 1)
|
|
|
- tick_nohz_full_kick_all();
|
|
|
- }
|
|
|
+ if (event->attr.freq)
|
|
|
+ account_freq_event();
|
|
|
if (event->attr.context_switch) {
|
|
|
atomic_inc(&nr_switch_events);
|
|
|
inc = true;
|