|
|
@@ -15,6 +15,7 @@
|
|
|
#include "evlist.h"
|
|
|
#include "evsel.h"
|
|
|
#include "debug.h"
|
|
|
+#include "asm/bug.h"
|
|
|
#include <unistd.h>
|
|
|
|
|
|
#include "parse-events.h"
|
|
|
@@ -27,8 +28,8 @@
|
|
|
#include <linux/log2.h>
|
|
|
#include <linux/err.h>
|
|
|
|
|
|
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
|
|
|
-static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
|
|
|
+static void perf_mmap__munmap(struct perf_mmap *map);
|
|
|
+static void perf_mmap__put(struct perf_mmap *map);
|
|
|
|
|
|
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
|
|
|
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
|
|
|
@@ -44,7 +45,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
|
|
|
perf_evlist__set_maps(evlist, cpus, threads);
|
|
|
fdarray__init(&evlist->pollfd, 64);
|
|
|
evlist->workload.pid = -1;
|
|
|
- evlist->backward = false;
|
|
|
+ evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
|
|
|
}
|
|
|
|
|
|
struct perf_evlist *perf_evlist__new(void)
|
|
|
@@ -122,6 +123,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
|
|
|
void perf_evlist__exit(struct perf_evlist *evlist)
|
|
|
{
|
|
|
zfree(&evlist->mmap);
|
|
|
+ zfree(&evlist->backward_mmap);
|
|
|
fdarray__exit(&evlist->pollfd);
|
|
|
}
|
|
|
|
|
|
@@ -465,7 +467,8 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx, short revent)
|
|
|
+static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
|
|
|
+ struct perf_mmap *map, short revent)
|
|
|
{
|
|
|
int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
|
|
|
/*
|
|
|
@@ -473,7 +476,7 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx
|
|
|
* close the associated evlist->mmap[] entry.
|
|
|
*/
|
|
|
if (pos >= 0) {
|
|
|
- evlist->pollfd.priv[pos].idx = idx;
|
|
|
+ evlist->pollfd.priv[pos].ptr = map;
|
|
|
|
|
|
fcntl(fd, F_SETFL, O_NONBLOCK);
|
|
|
}
|
|
|
@@ -483,15 +486,16 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx
|
|
|
|
|
|
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
|
|
|
{
|
|
|
- return __perf_evlist__add_pollfd(evlist, fd, -1, POLLIN);
|
|
|
+ return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
|
|
|
}
|
|
|
|
|
|
static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
|
|
|
void *arg __maybe_unused)
|
|
|
{
|
|
|
- struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
|
|
|
+ struct perf_mmap *map = fda->priv[fd].ptr;
|
|
|
|
|
|
- perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
|
|
|
+ if (map)
|
|
|
+ perf_mmap__put(map);
|
|
|
}
|
|
|
|
|
|
int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
|
|
|
@@ -688,8 +692,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ if (!evlist->backward_mmap)
|
|
|
+ return 0;
|
|
|
+
|
|
|
for (i = 0; i < evlist->nr_mmaps; i++) {
|
|
|
- int fd = evlist->mmap[i].fd;
|
|
|
+ int fd = evlist->backward_mmap[i].fd;
|
|
|
int err;
|
|
|
|
|
|
if (fd < 0)
|
|
|
@@ -701,12 +708,12 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-int perf_evlist__pause(struct perf_evlist *evlist)
|
|
|
+static int perf_evlist__pause(struct perf_evlist *evlist)
|
|
|
{
|
|
|
return perf_evlist__set_paused(evlist, true);
|
|
|
}
|
|
|
|
|
|
-int perf_evlist__resume(struct perf_evlist *evlist)
|
|
|
+static int perf_evlist__resume(struct perf_evlist *evlist)
|
|
|
{
|
|
|
return perf_evlist__set_paused(evlist, false);
|
|
|
}
|
|
|
@@ -781,9 +788,8 @@ broken_event:
|
|
|
return event;
|
|
|
}
|
|
|
|
|
|
-union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
|
|
|
+union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
|
|
|
{
|
|
|
- struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
u64 head;
|
|
|
u64 old = md->prev;
|
|
|
|
|
|
@@ -795,13 +801,12 @@ union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int
|
|
|
|
|
|
head = perf_mmap__read_head(md);
|
|
|
|
|
|
- return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
|
|
|
+ return perf_mmap__read(md, check_messup, old, head, &md->prev);
|
|
|
}
|
|
|
|
|
|
union perf_event *
|
|
|
-perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
|
|
|
+perf_mmap__read_backward(struct perf_mmap *md)
|
|
|
{
|
|
|
- struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
u64 head, end;
|
|
|
u64 start = md->prev;
|
|
|
|
|
|
@@ -836,16 +841,38 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
|
|
|
return perf_mmap__read(md, false, start, end, &md->prev);
|
|
|
}
|
|
|
|
|
|
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
|
|
|
+union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
|
|
|
{
|
|
|
- if (!evlist->backward)
|
|
|
- return perf_evlist__mmap_read_forward(evlist, idx);
|
|
|
- return perf_evlist__mmap_read_backward(evlist, idx);
|
|
|
+ struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check messup is required for forward overwritable ring buffer:
|
|
|
+ * memory pointed by md->prev can be overwritten in this case.
|
|
|
+ * No need for read-write ring buffer: kernel stop outputting when
|
|
|
+ * it hit md->prev (perf_mmap__consume()).
|
|
|
+ */
|
|
|
+ return perf_mmap__read_forward(md, evlist->overwrite);
|
|
|
}
|
|
|
|
|
|
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
|
|
|
+union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
|
|
|
{
|
|
|
struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
+
|
|
|
+ /*
|
|
|
+ * No need to check messup for backward ring buffer:
|
|
|
+ * We can always read arbitrary long data from a backward
|
|
|
+ * ring buffer unless we forget to pause it before reading.
|
|
|
+ */
|
|
|
+ return perf_mmap__read_backward(md);
|
|
|
+}
|
|
|
+
|
|
|
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
|
|
|
+{
|
|
|
+ return perf_evlist__mmap_read_forward(evlist, idx);
|
|
|
+}
|
|
|
+
|
|
|
+void perf_mmap__read_catchup(struct perf_mmap *md)
|
|
|
+{
|
|
|
u64 head;
|
|
|
|
|
|
if (!atomic_read(&md->refcnt))
|
|
|
@@ -855,38 +882,44 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
|
|
|
md->prev = head;
|
|
|
}
|
|
|
|
|
|
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
|
|
|
+{
|
|
|
+ perf_mmap__read_catchup(&evlist->mmap[idx]);
|
|
|
+}
|
|
|
+
|
|
|
static bool perf_mmap__empty(struct perf_mmap *md)
|
|
|
{
|
|
|
return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
|
|
|
}
|
|
|
|
|
|
-static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
|
|
|
+static void perf_mmap__get(struct perf_mmap *map)
|
|
|
{
|
|
|
- atomic_inc(&evlist->mmap[idx].refcnt);
|
|
|
+ atomic_inc(&map->refcnt);
|
|
|
}
|
|
|
|
|
|
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
|
|
|
+static void perf_mmap__put(struct perf_mmap *md)
|
|
|
{
|
|
|
- struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
-
|
|
|
BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
|
|
|
|
|
|
if (atomic_dec_and_test(&md->refcnt))
|
|
|
- __perf_evlist__munmap(evlist, idx);
|
|
|
+ perf_mmap__munmap(md);
|
|
|
}
|
|
|
|
|
|
-void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
|
|
|
+void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
|
|
|
{
|
|
|
- struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
-
|
|
|
- if (!evlist->overwrite) {
|
|
|
+ if (!overwrite) {
|
|
|
u64 old = md->prev;
|
|
|
|
|
|
perf_mmap__write_tail(md, old);
|
|
|
}
|
|
|
|
|
|
if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
|
|
|
- perf_evlist__mmap_put(evlist, idx);
|
|
|
+ perf_mmap__put(md);
|
|
|
+}
|
|
|
+
|
|
|
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
|
|
|
+{
|
|
|
+ perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
|
|
|
}
|
|
|
|
|
|
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
|
|
|
@@ -917,44 +950,52 @@ void __weak auxtrace_mmap_params__set_idx(
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
|
|
|
+static void perf_mmap__munmap(struct perf_mmap *map)
|
|
|
{
|
|
|
- if (evlist->mmap[idx].base != NULL) {
|
|
|
- munmap(evlist->mmap[idx].base, evlist->mmap_len);
|
|
|
- evlist->mmap[idx].base = NULL;
|
|
|
- evlist->mmap[idx].fd = -1;
|
|
|
- atomic_set(&evlist->mmap[idx].refcnt, 0);
|
|
|
+ if (map->base != NULL) {
|
|
|
+ munmap(map->base, perf_mmap__mmap_len(map));
|
|
|
+ map->base = NULL;
|
|
|
+ map->fd = -1;
|
|
|
+ atomic_set(&map->refcnt, 0);
|
|
|
}
|
|
|
- auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
|
|
|
+ auxtrace_mmap__munmap(&map->auxtrace_mmap);
|
|
|
}
|
|
|
|
|
|
-void perf_evlist__munmap(struct perf_evlist *evlist)
|
|
|
+static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
- if (evlist->mmap == NULL)
|
|
|
- return;
|
|
|
+ if (evlist->mmap)
|
|
|
+ for (i = 0; i < evlist->nr_mmaps; i++)
|
|
|
+ perf_mmap__munmap(&evlist->mmap[i]);
|
|
|
|
|
|
- for (i = 0; i < evlist->nr_mmaps; i++)
|
|
|
- __perf_evlist__munmap(evlist, i);
|
|
|
+ if (evlist->backward_mmap)
|
|
|
+ for (i = 0; i < evlist->nr_mmaps; i++)
|
|
|
+ perf_mmap__munmap(&evlist->backward_mmap[i]);
|
|
|
+}
|
|
|
|
|
|
+void perf_evlist__munmap(struct perf_evlist *evlist)
|
|
|
+{
|
|
|
+ perf_evlist__munmap_nofree(evlist);
|
|
|
zfree(&evlist->mmap);
|
|
|
+ zfree(&evlist->backward_mmap);
|
|
|
}
|
|
|
|
|
|
-static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
|
|
+static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int i;
|
|
|
+ struct perf_mmap *map;
|
|
|
|
|
|
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
|
|
|
if (cpu_map__empty(evlist->cpus))
|
|
|
evlist->nr_mmaps = thread_map__nr(evlist->threads);
|
|
|
- evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
|
|
- if (!evlist->mmap)
|
|
|
- return -ENOMEM;
|
|
|
+ map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
|
|
|
+ if (!map)
|
|
|
+ return NULL;
|
|
|
|
|
|
for (i = 0; i < evlist->nr_mmaps; i++)
|
|
|
- evlist->mmap[i].fd = -1;
|
|
|
- return 0;
|
|
|
+ map[i].fd = -1;
|
|
|
+ return map;
|
|
|
}
|
|
|
|
|
|
struct mmap_params {
|
|
|
@@ -963,8 +1004,8 @@ struct mmap_params {
|
|
|
struct auxtrace_mmap_params auxtrace_mp;
|
|
|
};
|
|
|
|
|
|
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
|
|
|
- struct mmap_params *mp, int fd)
|
|
|
+static int perf_mmap__mmap(struct perf_mmap *map,
|
|
|
+ struct mmap_params *mp, int fd)
|
|
|
{
|
|
|
/*
|
|
|
* The last one will be done at perf_evlist__mmap_consume(), so that we
|
|
|
@@ -979,21 +1020,21 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
|
|
|
* evlist layer can't just drop it when filtering events in
|
|
|
* perf_evlist__filter_pollfd().
|
|
|
*/
|
|
|
- atomic_set(&evlist->mmap[idx].refcnt, 2);
|
|
|
- evlist->mmap[idx].prev = 0;
|
|
|
- evlist->mmap[idx].mask = mp->mask;
|
|
|
- evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
|
|
|
- MAP_SHARED, fd, 0);
|
|
|
- if (evlist->mmap[idx].base == MAP_FAILED) {
|
|
|
+ atomic_set(&map->refcnt, 2);
|
|
|
+ map->prev = 0;
|
|
|
+ map->mask = mp->mask;
|
|
|
+ map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
|
|
|
+ MAP_SHARED, fd, 0);
|
|
|
+ if (map->base == MAP_FAILED) {
|
|
|
pr_debug2("failed to mmap perf event ring buffer, error %d\n",
|
|
|
errno);
|
|
|
- evlist->mmap[idx].base = NULL;
|
|
|
+ map->base = NULL;
|
|
|
return -1;
|
|
|
}
|
|
|
- evlist->mmap[idx].fd = fd;
|
|
|
+ map->fd = fd;
|
|
|
|
|
|
- if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
|
|
|
- &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
|
|
|
+ if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
|
|
|
+ &mp->auxtrace_mp, map->base, fd))
|
|
|
return -1;
|
|
|
|
|
|
return 0;
|
|
|
@@ -1003,23 +1044,36 @@ static bool
|
|
|
perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
|
|
|
struct perf_evsel *evsel)
|
|
|
{
|
|
|
- if (evsel->overwrite)
|
|
|
+ if (evsel->attr.write_backward)
|
|
|
return false;
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
|
|
|
struct mmap_params *mp, int cpu,
|
|
|
- int thread, int *output)
|
|
|
+ int thread, int *_output, int *_output_backward)
|
|
|
{
|
|
|
struct perf_evsel *evsel;
|
|
|
int revent;
|
|
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
|
+ struct perf_mmap *maps = evlist->mmap;
|
|
|
+ int *output = _output;
|
|
|
int fd;
|
|
|
|
|
|
- if (evsel->overwrite != (evlist->overwrite && evlist->backward))
|
|
|
- continue;
|
|
|
+ if (evsel->attr.write_backward) {
|
|
|
+ output = _output_backward;
|
|
|
+ maps = evlist->backward_mmap;
|
|
|
+
|
|
|
+ if (!maps) {
|
|
|
+ maps = perf_evlist__alloc_mmap(evlist);
|
|
|
+ if (!maps)
|
|
|
+ return -1;
|
|
|
+ evlist->backward_mmap = maps;
|
|
|
+ if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
|
|
|
+ perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
if (evsel->system_wide && thread)
|
|
|
continue;
|
|
|
@@ -1028,13 +1082,14 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
|
|
|
|
|
|
if (*output == -1) {
|
|
|
*output = fd;
|
|
|
- if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
|
|
|
+
|
|
|
+ if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
|
|
|
return -1;
|
|
|
} else {
|
|
|
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
|
|
|
return -1;
|
|
|
|
|
|
- perf_evlist__mmap_get(evlist, idx);
|
|
|
+ perf_mmap__get(&maps[idx]);
|
|
|
}
|
|
|
|
|
|
revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
|
|
|
@@ -1047,8 +1102,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
|
|
|
* Therefore don't add it for polling.
|
|
|
*/
|
|
|
if (!evsel->system_wide &&
|
|
|
- __perf_evlist__add_pollfd(evlist, fd, idx, revent) < 0) {
|
|
|
- perf_evlist__mmap_put(evlist, idx);
|
|
|
+ __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
|
|
|
+ perf_mmap__put(&maps[idx]);
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
@@ -1074,13 +1129,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
|
|
|
pr_debug2("perf event ring buffer mmapped per cpu\n");
|
|
|
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
|
|
int output = -1;
|
|
|
+ int output_backward = -1;
|
|
|
|
|
|
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
|
|
|
true);
|
|
|
|
|
|
for (thread = 0; thread < nr_threads; thread++) {
|
|
|
if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
|
|
|
- thread, &output))
|
|
|
+ thread, &output, &output_backward))
|
|
|
goto out_unmap;
|
|
|
}
|
|
|
}
|
|
|
@@ -1088,8 +1144,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
|
|
|
return 0;
|
|
|
|
|
|
out_unmap:
|
|
|
- for (cpu = 0; cpu < nr_cpus; cpu++)
|
|
|
- __perf_evlist__munmap(evlist, cpu);
|
|
|
+ perf_evlist__munmap_nofree(evlist);
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
@@ -1102,20 +1157,20 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
|
|
|
pr_debug2("perf event ring buffer mmapped per thread\n");
|
|
|
for (thread = 0; thread < nr_threads; thread++) {
|
|
|
int output = -1;
|
|
|
+ int output_backward = -1;
|
|
|
|
|
|
auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
|
|
|
false);
|
|
|
|
|
|
if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
|
|
|
- &output))
|
|
|
+ &output, &output_backward))
|
|
|
goto out_unmap;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
out_unmap:
|
|
|
- for (thread = 0; thread < nr_threads; thread++)
|
|
|
- __perf_evlist__munmap(evlist, thread);
|
|
|
+ perf_evlist__munmap_nofree(evlist);
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
@@ -1248,7 +1303,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
|
|
|
.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
|
|
|
};
|
|
|
|
|
|
- if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
|
|
|
+ if (!evlist->mmap)
|
|
|
+ evlist->mmap = perf_evlist__alloc_mmap(evlist);
|
|
|
+ if (!evlist->mmap)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
|
|
|
@@ -1919,3 +1976,61 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
|
|
|
|
|
|
return NULL;
|
|
|
}
|
|
|
+
|
|
|
+void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
|
|
|
+ enum bkw_mmap_state state)
|
|
|
+{
|
|
|
+ enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
|
|
|
+ enum action {
|
|
|
+ NONE,
|
|
|
+ PAUSE,
|
|
|
+ RESUME,
|
|
|
+ } action = NONE;
|
|
|
+
|
|
|
+ if (!evlist->backward_mmap)
|
|
|
+ return;
|
|
|
+
|
|
|
+ switch (old_state) {
|
|
|
+ case BKW_MMAP_NOTREADY: {
|
|
|
+ if (state != BKW_MMAP_RUNNING)
|
|
|
+ goto state_err;;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case BKW_MMAP_RUNNING: {
|
|
|
+ if (state != BKW_MMAP_DATA_PENDING)
|
|
|
+ goto state_err;
|
|
|
+ action = PAUSE;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case BKW_MMAP_DATA_PENDING: {
|
|
|
+ if (state != BKW_MMAP_EMPTY)
|
|
|
+ goto state_err;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case BKW_MMAP_EMPTY: {
|
|
|
+ if (state != BKW_MMAP_RUNNING)
|
|
|
+ goto state_err;
|
|
|
+ action = RESUME;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ WARN_ONCE(1, "Shouldn't get there\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ evlist->bkw_mmap_state = state;
|
|
|
+
|
|
|
+ switch (action) {
|
|
|
+ case PAUSE:
|
|
|
+ perf_evlist__pause(evlist);
|
|
|
+ break;
|
|
|
+ case RESUME:
|
|
|
+ perf_evlist__resume(evlist);
|
|
|
+ break;
|
|
|
+ case NONE:
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+state_err:
|
|
|
+ return;
|
|
|
+}
|