|
@@ -1207,16 +1207,27 @@ int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
|
|
|
|
|
|
int perf_evsel__enable(struct perf_evsel *evsel)
|
|
|
{
|
|
|
- return perf_evsel__run_ioctl(evsel,
|
|
|
- PERF_EVENT_IOC_ENABLE,
|
|
|
- 0);
|
|
|
+ int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
|
|
|
+
|
|
|
+ if (!err)
|
|
|
+ evsel->disabled = false;
|
|
|
+
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
int perf_evsel__disable(struct perf_evsel *evsel)
|
|
|
{
|
|
|
- return perf_evsel__run_ioctl(evsel,
|
|
|
- PERF_EVENT_IOC_DISABLE,
|
|
|
- 0);
|
|
|
+ int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
|
|
|
+ /*
|
|
|
+ * We mark it disabled here so that tools that disable a event can
|
|
|
+ * ignore events after they disable it. I.e. the ring buffer may have
|
|
|
+ * already a few more events queued up before the kernel got the stop
|
|
|
+ * request.
|
|
|
+ */
|
|
|
+ if (!err)
|
|
|
+ evsel->disabled = true;
|
|
|
+
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
|