|
@@ -702,29 +702,6 @@ static int perf_evlist__resume(struct perf_evlist *evlist)
|
|
|
return perf_evlist__set_paused(evlist, false);
|
|
|
}
|
|
|
|
|
|
-union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
|
|
|
-{
|
|
|
- struct perf_mmap *md = &evlist->mmap[idx];
|
|
|
-
|
|
|
- /*
|
|
|
- * Check messup is required for forward overwritable ring buffer:
|
|
|
- * memory pointed by md->prev can be overwritten in this case.
|
|
|
- * No need for read-write ring buffer: kernel stop outputting when
|
|
|
- * it hit md->prev (perf_mmap__consume()).
|
|
|
- */
|
|
|
- return perf_mmap__read_forward(md);
|
|
|
-}
|
|
|
-
|
|
|
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
|
|
|
-{
|
|
|
- return perf_evlist__mmap_read_forward(evlist, idx);
|
|
|
-}
|
|
|
-
|
|
|
-void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
|
|
|
-{
|
|
|
- perf_mmap__consume(&evlist->mmap[idx], false);
|
|
|
-}
|
|
|
-
|
|
|
static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
|
|
|
{
|
|
|
int i;
|
|
@@ -761,7 +738,7 @@ static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
|
|
|
map[i].fd = -1;
|
|
|
/*
|
|
|
* When the perf_mmap() call is made we grab one refcount, plus
|
|
|
- * one extra to let perf_evlist__mmap_consume() get the last
|
|
|
+ * one extra to let perf_mmap__consume() get the last
|
|
|
* events after all real references (perf_mmap__get()) are
|
|
|
* dropped.
|
|
|
*
|