|
@@ -777,7 +777,7 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messu
|
|
|
/*
|
|
/*
|
|
|
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
|
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
|
|
*/
|
|
*/
|
|
|
- if (!atomic_read(&md->refcnt))
|
|
|
|
|
|
|
+ if (!refcount_read(&md->refcnt))
|
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
|
|
head = perf_mmap__read_head(md);
|
|
head = perf_mmap__read_head(md);
|
|
@@ -794,7 +794,7 @@ perf_mmap__read_backward(struct perf_mmap *md)
|
|
|
/*
|
|
/*
|
|
|
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
|
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
|
|
*/
|
|
*/
|
|
|
- if (!atomic_read(&md->refcnt))
|
|
|
|
|
|
|
+ if (!refcount_read(&md->refcnt))
|
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
|
|
head = perf_mmap__read_head(md);
|
|
head = perf_mmap__read_head(md);
|
|
@@ -856,7 +856,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md)
|
|
|
{
|
|
{
|
|
|
u64 head;
|
|
u64 head;
|
|
|
|
|
|
|
|
- if (!atomic_read(&md->refcnt))
|
|
|
|
|
|
|
+ if (!refcount_read(&md->refcnt))
|
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
head = perf_mmap__read_head(md);
|
|
head = perf_mmap__read_head(md);
|
|
@@ -875,14 +875,14 @@ static bool perf_mmap__empty(struct perf_mmap *md)
|
|
|
|
|
|
|
|
static void perf_mmap__get(struct perf_mmap *map)
|
|
static void perf_mmap__get(struct perf_mmap *map)
|
|
|
{
|
|
{
|
|
|
- atomic_inc(&map->refcnt);
|
|
|
|
|
|
|
+ refcount_inc(&map->refcnt);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
static void perf_mmap__put(struct perf_mmap *md)
|
|
static void perf_mmap__put(struct perf_mmap *md)
|
|
|
{
|
|
{
|
|
|
- BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
|
|
|
|
|
|
|
+ BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
|
|
|
|
|
|
|
|
- if (atomic_dec_and_test(&md->refcnt))
|
|
|
|
|
|
|
+ if (refcount_dec_and_test(&md->refcnt))
|
|
|
perf_mmap__munmap(md);
|
|
perf_mmap__munmap(md);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -894,7 +894,7 @@ void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
|
|
|
perf_mmap__write_tail(md, old);
|
|
perf_mmap__write_tail(md, old);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
|
|
|
|
|
|
|
+ if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
|
|
|
perf_mmap__put(md);
|
|
perf_mmap__put(md);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -937,7 +937,7 @@ static void perf_mmap__munmap(struct perf_mmap *map)
|
|
|
munmap(map->base, perf_mmap__mmap_len(map));
|
|
munmap(map->base, perf_mmap__mmap_len(map));
|
|
|
map->base = NULL;
|
|
map->base = NULL;
|
|
|
map->fd = -1;
|
|
map->fd = -1;
|
|
|
- atomic_set(&map->refcnt, 0);
|
|
|
|
|
|
|
+ refcount_set(&map->refcnt, 0);
|
|
|
}
|
|
}
|
|
|
auxtrace_mmap__munmap(&map->auxtrace_mmap);
|
|
auxtrace_mmap__munmap(&map->auxtrace_mmap);
|
|
|
}
|
|
}
|
|
@@ -1001,7 +1001,7 @@ static int perf_mmap__mmap(struct perf_mmap *map,
|
|
|
* evlist layer can't just drop it when filtering events in
|
|
* evlist layer can't just drop it when filtering events in
|
|
|
* perf_evlist__filter_pollfd().
|
|
* perf_evlist__filter_pollfd().
|
|
|
*/
|
|
*/
|
|
|
- atomic_set(&map->refcnt, 2);
|
|
|
|
|
|
|
+ refcount_set(&map->refcnt, 2);
|
|
|
map->prev = 0;
|
|
map->prev = 0;
|
|
|
map->mask = mp->mask;
|
|
map->mask = mp->mask;
|
|
|
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
|
|
map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
|