|
@@ -355,6 +355,53 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
|
|
|
+ struct perf_evsel *evsel, int cpu)
|
|
|
+{
|
|
|
+ int thread, err;
|
|
|
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
|
|
|
+
|
|
|
+ if (!evsel->fd)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
|
+ err = ioctl(FD(evsel, cpu, thread),
|
|
|
+ PERF_EVENT_IOC_ENABLE, 0);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
|
|
|
+ struct perf_evsel *evsel,
|
|
|
+ int thread)
|
|
|
+{
|
|
|
+ int cpu, err;
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
+
|
|
|
+ if (!evsel->fd)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
+ err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
|
|
|
+ struct perf_evsel *evsel, int idx)
|
|
|
+{
|
|
|
+ bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
|
|
|
+
|
|
|
+ if (per_cpu_mmaps)
|
|
|
+ return perf_evlist__enable_event_cpu(evlist, evsel, idx);
|
|
|
+ else
|
|
|
+ return perf_evlist__enable_event_thread(evlist, evsel, idx);
|
|
|
+}
|
|
|
+
|
|
|
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|