perf evlist: Introduce backward_mmap array for evlist

Add backward_mmap to evlist, free it together with normal mmap.

Improve perf_evlist__pick_pc(), search backward_mmap if evlist->mmap is
not available.

This patch doesn't alloc this array. It will be allocated conditionally
in the following commits.

Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-8-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Wang Nan 2016-07-14 08:34:39 +00:00 коммит произвёл Arnaldo Carvalho de Melo
Родитель a1f7261834
Коммит b2cb615d8a
3 изменённых файлов: 16 добавлений и 7 удалений

Просмотреть файл

@ -509,7 +509,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (!evlist) if (!evlist)
return 0; return 0;
maps = evlist->mmap; maps = backward ? evlist->backward_mmap : evlist->mmap;
if (!maps) if (!maps)
return 0; return 0;
@ -696,8 +696,12 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
static const struct perf_event_mmap_page * static const struct perf_event_mmap_page *
perf_evlist__pick_pc(struct perf_evlist *evlist) perf_evlist__pick_pc(struct perf_evlist *evlist)
{ {
if (evlist && evlist->mmap && evlist->mmap[0].base) if (evlist) {
if (evlist->mmap && evlist->mmap[0].base)
return evlist->mmap[0].base; return evlist->mmap[0].base;
if (evlist->backward_mmap && evlist->backward_mmap[0].base)
return evlist->backward_mmap[0].base;
}
return NULL; return NULL;
} }

Просмотреть файл

@ -123,6 +123,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist) void perf_evlist__exit(struct perf_evlist *evlist)
{ {
zfree(&evlist->mmap); zfree(&evlist->mmap);
zfree(&evlist->backward_mmap);
fdarray__exit(&evlist->pollfd); fdarray__exit(&evlist->pollfd);
} }
@ -973,17 +974,20 @@ static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
{ {
int i; int i;
if (evlist->mmap == NULL) if (evlist->mmap)
return;
for (i = 0; i < evlist->nr_mmaps; i++) for (i = 0; i < evlist->nr_mmaps; i++)
perf_mmap__munmap(&evlist->mmap[i]); perf_mmap__munmap(&evlist->mmap[i]);
if (evlist->backward_mmap)
for (i = 0; i < evlist->nr_mmaps; i++)
perf_mmap__munmap(&evlist->backward_mmap[i]);
} }
void perf_evlist__munmap(struct perf_evlist *evlist) void perf_evlist__munmap(struct perf_evlist *evlist)
{ {
perf_evlist__munmap_nofree(evlist); perf_evlist__munmap_nofree(evlist);
zfree(&evlist->mmap); zfree(&evlist->mmap);
zfree(&evlist->backward_mmap);
} }
static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)

Просмотреть файл

@ -61,6 +61,7 @@ struct perf_evlist {
} workload; } workload;
struct fdarray pollfd; struct fdarray pollfd;
struct perf_mmap *mmap; struct perf_mmap *mmap;
struct perf_mmap *backward_mmap;
struct thread_map *threads; struct thread_map *threads;
struct cpu_map *cpus; struct cpu_map *cpus;
struct perf_evsel *selected; struct perf_evsel *selected;