|
@@ -265,17 +265,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int perf_evlist__nr_threads(struct perf_evlist *evlist,
|
|
|
|
+ struct perf_evsel *evsel)
|
|
|
|
+{
|
|
|
|
+ if (evsel->system_wide)
|
|
|
|
+ return 1;
|
|
|
|
+ else
|
|
|
|
+ return thread_map__nr(evlist->threads);
|
|
|
|
+}
|
|
|
|
+
|
|
void perf_evlist__disable(struct perf_evlist *evlist)
|
|
void perf_evlist__disable(struct perf_evlist *evlist)
|
|
{
|
|
{
|
|
int cpu, thread;
|
|
int cpu, thread;
|
|
struct perf_evsel *pos;
|
|
struct perf_evsel *pos;
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
- int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
+ int nr_threads;
|
|
|
|
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
evlist__for_each(evlist, pos) {
|
|
evlist__for_each(evlist, pos) {
|
|
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
|
|
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
|
|
continue;
|
|
continue;
|
|
|
|
+ nr_threads = perf_evlist__nr_threads(evlist, pos);
|
|
for (thread = 0; thread < nr_threads; thread++)
|
|
for (thread = 0; thread < nr_threads; thread++)
|
|
ioctl(FD(pos, cpu, thread),
|
|
ioctl(FD(pos, cpu, thread),
|
|
PERF_EVENT_IOC_DISABLE, 0);
|
|
PERF_EVENT_IOC_DISABLE, 0);
|
|
@@ -288,12 +298,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
|
|
int cpu, thread;
|
|
int cpu, thread;
|
|
struct perf_evsel *pos;
|
|
struct perf_evsel *pos;
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
- int nr_threads = thread_map__nr(evlist->threads);
|
|
|
|
|
|
+ int nr_threads;
|
|
|
|
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
evlist__for_each(evlist, pos) {
|
|
evlist__for_each(evlist, pos) {
|
|
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
|
|
if (!perf_evsel__is_group_leader(pos) || !pos->fd)
|
|
continue;
|
|
continue;
|
|
|
|
+ nr_threads = perf_evlist__nr_threads(evlist, pos);
|
|
for (thread = 0; thread < nr_threads; thread++)
|
|
for (thread = 0; thread < nr_threads; thread++)
|
|
ioctl(FD(pos, cpu, thread),
|
|
ioctl(FD(pos, cpu, thread),
|
|
PERF_EVENT_IOC_ENABLE, 0);
|
|
PERF_EVENT_IOC_ENABLE, 0);
|
|
@@ -305,12 +316,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist,
|
|
struct perf_evsel *evsel)
|
|
struct perf_evsel *evsel)
|
|
{
|
|
{
|
|
int cpu, thread, err;
|
|
int cpu, thread, err;
|
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
|
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
|
|
|
|
|
|
if (!evsel->fd)
|
|
if (!evsel->fd)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
|
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++) {
|
|
|
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
err = ioctl(FD(evsel, cpu, thread),
|
|
err = ioctl(FD(evsel, cpu, thread),
|
|
PERF_EVENT_IOC_DISABLE, 0);
|
|
PERF_EVENT_IOC_DISABLE, 0);
|
|
if (err)
|
|
if (err)
|
|
@@ -324,12 +337,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
|
|
struct perf_evsel *evsel)
|
|
struct perf_evsel *evsel)
|
|
{
|
|
{
|
|
int cpu, thread, err;
|
|
int cpu, thread, err;
|
|
|
|
+ int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
|
|
+ int nr_threads = perf_evlist__nr_threads(evlist, evsel);
|
|
|
|
|
|
if (!evsel->fd)
|
|
if (!evsel->fd)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
- for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
|
|
|
|
- for (thread = 0; thread < evlist->threads->nr; thread++) {
|
|
|
|
|
|
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
|
+ for (thread = 0; thread < nr_threads; thread++) {
|
|
err = ioctl(FD(evsel, cpu, thread),
|
|
err = ioctl(FD(evsel, cpu, thread),
|
|
PERF_EVENT_IOC_ENABLE, 0);
|
|
PERF_EVENT_IOC_ENABLE, 0);
|
|
if (err)
|
|
if (err)
|
|
@@ -343,7 +358,16 @@ static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
|
{
|
|
{
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
int nr_cpus = cpu_map__nr(evlist->cpus);
|
|
int nr_threads = thread_map__nr(evlist->threads);
|
|
int nr_threads = thread_map__nr(evlist->threads);
|
|
- int nfds = nr_cpus * nr_threads * evlist->nr_entries;
|
|
|
|
|
|
+ int nfds = 0;
|
|
|
|
+ struct perf_evsel *evsel;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(evsel, &evlist->entries, node) {
|
|
|
|
+ if (evsel->system_wide)
|
|
|
|
+ nfds += nr_cpus;
|
|
|
|
+ else
|
|
|
|
+ nfds += nr_cpus * nr_threads;
|
|
|
|
+ }
|
|
|
|
+
|
|
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
|
|
evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
|
|
return evlist->pollfd != NULL ? 0 : -ENOMEM;
|
|
return evlist->pollfd != NULL ? 0 : -ENOMEM;
|
|
}
|
|
}
|
|
@@ -636,7 +660,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
|
|
struct perf_evsel *evsel;
|
|
struct perf_evsel *evsel;
|
|
|
|
|
|
evlist__for_each(evlist, evsel) {
|
|
evlist__for_each(evlist, evsel) {
|
|
- int fd = FD(evsel, cpu, thread);
|
|
|
|
|
|
+ int fd;
|
|
|
|
+
|
|
|
|
+ if (evsel->system_wide && thread)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ fd = FD(evsel, cpu, thread);
|
|
|
|
|
|
if (*output == -1) {
|
|
if (*output == -1) {
|
|
*output = fd;
|
|
*output = fd;
|