|
@@ -7060,25 +7060,12 @@ static void perf_log_itrace_start(struct perf_event *event)
|
|
|
perf_output_end(&handle);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Generic event overflow handling, sampling.
|
|
|
- */
|
|
|
-
|
|
|
-static int __perf_event_overflow(struct perf_event *event,
|
|
|
- int throttle, struct perf_sample_data *data,
|
|
|
- struct pt_regs *regs)
|
|
|
+static int
|
|
|
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
|
|
|
{
|
|
|
- int events = atomic_read(&event->event_limit);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
- u64 seq;
|
|
|
int ret = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * Non-sampling counters might still use the PMI to fold short
|
|
|
- * hardware counters, ignore those.
|
|
|
- */
|
|
|
- if (unlikely(!is_sampling_event(event)))
|
|
|
- return 0;
|
|
|
+ u64 seq;
|
|
|
|
|
|
seq = __this_cpu_read(perf_throttled_seq);
|
|
|
if (seq != hwc->interrupts_seq) {
|
|
@@ -7106,6 +7093,34 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
perf_adjust_period(event, delta, hwc->last_period, true);
|
|
|
}
|
|
|
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int perf_event_account_interrupt(struct perf_event *event)
|
|
|
+{
|
|
|
+ return __perf_event_account_interrupt(event, 1);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Generic event overflow handling, sampling.
|
|
|
+ */
|
|
|
+
|
|
|
+static int __perf_event_overflow(struct perf_event *event,
|
|
|
+ int throttle, struct perf_sample_data *data,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ int events = atomic_read(&event->event_limit);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Non-sampling counters might still use the PMI to fold short
|
|
|
+ * hardware counters, ignore those.
|
|
|
+ */
|
|
|
+ if (unlikely(!is_sampling_event(event)))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ ret = __perf_event_account_interrupt(event, throttle);
|
|
|
+
|
|
|
/*
|
|
|
* XXX event_limit might not quite work as expected on inherited
|
|
|
* events
|