|
@@ -2092,8 +2092,18 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
|
|
|
union perf_event *event __maybe_unused,
|
|
|
struct perf_sample *sample)
|
|
|
{
|
|
|
- struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
|
|
|
+ struct thread *thread;
|
|
|
int callchain_ret = 0;
|
|
|
+ /*
|
|
|
+ * Check if we called perf_evsel__disable(evsel) due to, for instance,
|
|
|
+ * this event's max_events having been hit and this is an entry coming
|
|
|
+ * from the ring buffer that we should discard, since the max events
|
|
|
+ * have already been considered/printed.
|
|
|
+ */
|
|
|
+ if (evsel->disabled)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
|
|
|
|
|
|
if (sample->callchain) {
|
|
|
callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
|
|
@@ -2142,6 +2152,11 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
|
|
|
sample->raw_data, sample->raw_size,
|
|
|
trace->output);
|
|
|
++trace->nr_events_printed;
|
|
|
+
|
|
|
+ if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
|
|
|
+ perf_evsel__disable(evsel);
|
|
|
+ perf_evsel__close(evsel);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2726,7 +2741,7 @@ next_event:
|
|
|
int timeout = done ? 100 : -1;
|
|
|
|
|
|
if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
|
|
|
- if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
|
|
|
+ if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
|
|
|
draining = true;
|
|
|
|
|
|
goto again;
|