libperf: Introduce perf_evlist_mmap_ops::mmap callback

Add the perf_evlist_mmap_ops::mmap callback to be called in
mmap_per_evsel() to actually mmap the map.

Add libperf's perf_evlist__mmap_cb_mmap() function as libperf's mmap
callback.

New mmaped map gets refcount set to 2 in mmap__mmap(), we follow that in
mmap callback. We will move this to common place after we switch to
perf_evlist__mmap().

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-18-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2019-10-07 14:53:25 +02:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель 3a8bb58121
Коммит b5911e7ac2
2 изменённых файлов: 29 добавлений и 3 удалений

Просмотреть файл

@ -358,6 +358,28 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
int output, int cpu)
{
/*
* The last one will be done at perf_mmap__consume(), so that we
* make sure we don't prevent tools from consuming every last event in
* the ring buffer.
*
* I.e. we can get the POLLHUP meaning that the fd doesn't exist
* anymore, but the last events for it are still in the ring buffer,
* waiting to be consumed.
*
* Tools can chose to ignore this at their own discretion, but the
* evlist layer can't just drop it when filtering events in
* perf_evlist__filter_pollfd().
*/
refcount_set(&map->refcnt, 2);
return perf_mmap__mmap(map, mp, output, cpu);
}
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
@ -396,7 +418,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (*output == -1) {
*output = fd;
if (perf_mmap__mmap(map, mp, *output, evlist_cpu) < 0)
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
} else {
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
@ -488,7 +510,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
const struct perf_cpu_map *cpus = evlist->cpus;
const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get)
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
if (!evlist->mmap)
@ -513,7 +535,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
{
struct perf_mmap_param mp;
struct perf_evlist_mmap_ops ops = {
.get = perf_evlist__mmap_cb_get,
.get = perf_evlist__mmap_cb_get,
.mmap = perf_evlist__mmap_cb_mmap,
};
evlist->mmap_len = (pages + 1) * page_size;

Просмотреть файл

@ -31,10 +31,13 @@ typedef void
(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
typedef int
(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
struct perf_evlist_mmap_ops {
perf_evlist_mmap__cb_idx_t idx;
perf_evlist_mmap__cb_get_t get;
perf_evlist_mmap__cb_mmap_t mmap;
};
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);