perf cpumap: Migrate to libperf cpumap api
Switch from directly accessing the perf_cpu_map to using the appropriate libperf API when possible. Using the API simplifies the job of refactoring use of perf_cpu_map. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: André Almeida <andrealmeid@collabora.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Dmitriy Vyukov <dvyukov@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: German Gomez <german.gomez@arm.com> Cc: James Clark <james.clark@arm.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: John Garry <john.garry@huawei.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Miaoqian Lin <linmq006@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Riccardo Mancini <rickyman7@gmail.com> Cc: Shunsuke Nakamura <nakamura.shun@fujitsu.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Stephen Brennan <stephen.s.brennan@oracle.com> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Richter <tmricht@linux.ibm.com> Cc: Yury Norov <yury.norov@gmail.com> Link: http://lore.kernel.org/lkml/20220122045811.3402706-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
1d1d9af254
Коммит
4402869939
|
@ -141,7 +141,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
|
|||
}
|
||||
|
||||
if (evsel->fd == NULL &&
|
||||
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
|
||||
perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
|
||||
|
@ -384,7 +384,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
|
|||
{
|
||||
int err = 0, i;
|
||||
|
||||
for (i = 0; i < evsel->cpus->nr && !err; i++)
|
||||
for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
|
||||
err = perf_evsel__run_ioctl(evsel,
|
||||
PERF_EVENT_IOC_SET_FILTER,
|
||||
(void *)filter, i);
|
||||
|
|
|
@ -333,7 +333,7 @@ int bench_epoll_ctl(int argc, const char **argv)
|
|||
|
||||
/* default to the number of CPUs */
|
||||
if (!nthreads)
|
||||
nthreads = cpu->nr;
|
||||
nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
worker = calloc(nthreads, sizeof(*worker));
|
||||
if (!worker)
|
||||
|
|
|
@ -452,7 +452,7 @@ int bench_epoll_wait(int argc, const char **argv)
|
|||
|
||||
/* default to the number of CPUs and leave one for the writer pthread */
|
||||
if (!nthreads)
|
||||
nthreads = cpu->nr - 1;
|
||||
nthreads = perf_cpu_map__nr(cpu) - 1;
|
||||
|
||||
worker = calloc(nthreads, sizeof(*worker));
|
||||
if (!worker) {
|
||||
|
|
|
@ -71,7 +71,7 @@ static int evlist__count_evsel_fds(struct evlist *evlist)
|
|||
int cnt = 0;
|
||||
|
||||
evlist__for_each_entry(evlist, evsel)
|
||||
cnt += evsel->core.threads->nr * evsel->core.cpus->nr;
|
||||
cnt += evsel->core.threads->nr * perf_cpu_map__nr(evsel->core.cpus);
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ static int bench_evlist_open_close__run(char *evstr)
|
|||
|
||||
init_stats(&time_stats);
|
||||
|
||||
printf(" Number of cpus:\t%d\n", evlist->core.cpus->nr);
|
||||
printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus));
|
||||
printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
|
||||
printf(" Number of events:\t%d (%d fds)\n",
|
||||
evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
|
||||
|
|
|
@ -150,7 +150,7 @@ int bench_futex_hash(int argc, const char **argv)
|
|||
}
|
||||
|
||||
if (!params.nthreads) /* default to the number of CPUs */
|
||||
params.nthreads = cpu->nr;
|
||||
params.nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
worker = calloc(params.nthreads, sizeof(*worker));
|
||||
if (!worker)
|
||||
|
|
|
@ -173,7 +173,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
|
|||
}
|
||||
|
||||
if (!params.nthreads)
|
||||
params.nthreads = cpu->nr;
|
||||
params.nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
worker = calloc(params.nthreads, sizeof(*worker));
|
||||
if (!worker)
|
||||
|
|
|
@ -175,7 +175,7 @@ int bench_futex_requeue(int argc, const char **argv)
|
|||
}
|
||||
|
||||
if (!params.nthreads)
|
||||
params.nthreads = cpu->nr;
|
||||
params.nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
worker = calloc(params.nthreads, sizeof(*worker));
|
||||
if (!worker)
|
||||
|
|
|
@ -252,7 +252,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
|
|||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
if (!params.nthreads)
|
||||
params.nthreads = cpu->nr;
|
||||
params.nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
/* some sanity checks */
|
||||
if (params.nwakes > params.nthreads ||
|
||||
|
|
|
@ -151,7 +151,7 @@ int bench_futex_wake(int argc, const char **argv)
|
|||
}
|
||||
|
||||
if (!params.nthreads)
|
||||
params.nthreads = cpu->nr;
|
||||
params.nthreads = perf_cpu_map__nr(cpu);
|
||||
|
||||
worker = calloc(params.nthreads, sizeof(*worker));
|
||||
if (!worker)
|
||||
|
|
|
@ -281,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
|
|||
int ret;
|
||||
int last_cpu;
|
||||
|
||||
last_cpu = perf_cpu_map__cpu(cpumap, cpumap->nr - 1).cpu;
|
||||
last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
|
||||
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
|
||||
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
|
||||
|
||||
|
|
|
@ -230,11 +230,12 @@ static bool cpus_map_matched(struct evsel *a, struct evsel *b)
|
|||
if (!a->core.cpus || !b->core.cpus)
|
||||
return false;
|
||||
|
||||
if (a->core.cpus->nr != b->core.cpus->nr)
|
||||
if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
|
||||
return false;
|
||||
|
||||
for (int i = 0; i < a->core.cpus->nr; i++) {
|
||||
if (a->core.cpus->map[i].cpu != b->core.cpus->map[i].cpu)
|
||||
for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
|
||||
if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
|
||||
perf_cpu_map__cpu(b->core.cpus, i).cpu)
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
|||
bm = bitmap_zalloc(nbits);
|
||||
|
||||
if (map && bm) {
|
||||
for (i = 0; i < map->nr; i++)
|
||||
set_bit(map->map[i].cpu, bm);
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++)
|
||||
set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
|
||||
}
|
||||
|
||||
if (map)
|
||||
|
|
|
@ -75,10 +75,10 @@ static int process_event_cpus(struct perf_tool *tool __maybe_unused,
|
|||
|
||||
TEST_ASSERT_VAL("wrong id", ev->id == 123);
|
||||
TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->nr == 3);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[0].cpu == 1);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[1].cpu == 2);
|
||||
TEST_ASSERT_VAL("wrong cpus", map->map[2].cpu == 3);
|
||||
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__nr(map) == 3);
|
||||
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1);
|
||||
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2);
|
||||
TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3);
|
||||
perf_cpu_map__put(map);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,14 +25,15 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
|||
{
|
||||
struct perf_cpu_map *map = perf_cpu_map__new(str);
|
||||
unsigned long *bm = NULL;
|
||||
int i;
|
||||
|
||||
bm = bitmap_zalloc(nbits);
|
||||
|
||||
if (map && bm) {
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
set_bit(map->map[i].cpu, bm);
|
||||
}
|
||||
struct perf_cpu cpu;
|
||||
int i;
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, i, map)
|
||||
set_bit(cpu.cpu, bm);
|
||||
}
|
||||
|
||||
if (map)
|
||||
|
|
|
@ -59,11 +59,12 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
|
|||
}
|
||||
|
||||
CPU_ZERO(&cpu_set);
|
||||
CPU_SET(cpus->map[0].cpu, &cpu_set);
|
||||
CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set);
|
||||
sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
|
||||
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
|
||||
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
|
||||
cpus->map[0].cpu, str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
perf_cpu_map__cpu(cpus, 0).cpu,
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
goto out_free_cpus;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,44 +122,48 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
|
|||
}
|
||||
|
||||
// Test that CPU ID contains socket, die, core and CPU
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL);
|
||||
TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match", map->map[i].cpu == id.cpu.cpu);
|
||||
TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match",
|
||||
perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu);
|
||||
|
||||
TEST_ASSERT_VAL("Cpu map - Core ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].core_id == id.core);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
|
||||
TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
|
||||
id.socket);
|
||||
|
||||
TEST_ASSERT_VAL("Cpu map - Die ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].die_id == id.die);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
|
||||
TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
|
||||
TEST_ASSERT_VAL("Cpu map - Thread is set", id.thread == -1);
|
||||
}
|
||||
|
||||
// Test that core ID contains socket, die and core
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL);
|
||||
TEST_ASSERT_VAL("Core map - Core ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].core_id == id.core);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
|
||||
|
||||
TEST_ASSERT_VAL("Core map - Socket ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
|
||||
id.socket);
|
||||
|
||||
TEST_ASSERT_VAL("Core map - Die ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].die_id == id.die);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
|
||||
TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
|
||||
TEST_ASSERT_VAL("Core map - Thread is set", id.thread == -1);
|
||||
}
|
||||
|
||||
// Test that die ID contains socket and die
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL);
|
||||
TEST_ASSERT_VAL("Die map - Socket ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
|
||||
id.socket);
|
||||
|
||||
TEST_ASSERT_VAL("Die map - Die ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].die_id == id.die);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
|
||||
|
||||
TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
|
||||
TEST_ASSERT_VAL("Die map - Core is set", id.core == -1);
|
||||
|
@ -168,10 +172,11 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
|
|||
}
|
||||
|
||||
// Test that socket ID contains only socket
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL);
|
||||
TEST_ASSERT_VAL("Socket map - Socket ID doesn't match",
|
||||
session->header.env.cpu[map->map[i].cpu].socket_id == id.socket);
|
||||
session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
|
||||
id.socket);
|
||||
|
||||
TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
|
||||
TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1);
|
||||
|
@ -181,10 +186,10 @@ static int check_cpu_topology(char *path, struct perf_cpu_map *map)
|
|||
}
|
||||
|
||||
// Test that node ID contains only node
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL);
|
||||
TEST_ASSERT_VAL("Node map - Node ID doesn't match",
|
||||
cpu__get_node(map->map[i]) == id.node);
|
||||
cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node);
|
||||
TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1);
|
||||
TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1);
|
||||
TEST_ASSERT_VAL("Node map - Core is set", id.core == -1);
|
||||
|
|
|
@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
|
|||
mp->idx = idx;
|
||||
|
||||
if (per_cpu) {
|
||||
mp->cpu = evlist->core.cpus->map[idx];
|
||||
mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
|
||||
if (evlist->core.threads)
|
||||
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
|
||||
else
|
||||
|
|
|
@ -61,7 +61,7 @@ int evsel__alloc_counts(struct evsel *evsel)
|
|||
struct perf_cpu_map *cpus = evsel__cpus(evsel);
|
||||
int nthreads = perf_thread_map__nr(evsel->core.threads);
|
||||
|
||||
evsel->counts = perf_counts__new(cpus ? cpus->nr : 1, nthreads);
|
||||
evsel->counts = perf_counts__new(perf_cpu_map__nr(cpus), nthreads);
|
||||
return evsel->counts != NULL ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ struct perf_cpu cpu__max_present_cpu(void);
|
|||
*/
|
||||
static inline bool cpu_map__is_dummy(struct perf_cpu_map *cpus)
|
||||
{
|
||||
return cpus->nr == 1 && cpus->map[0].cpu == -1;
|
||||
return perf_cpu_map__nr(cpus) == 1 && perf_cpu_map__cpu(cpus, 0).cpu == -1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -325,7 +325,7 @@ struct numa_topology *numa_topology__new(void)
|
|||
if (!node_map)
|
||||
goto out;
|
||||
|
||||
nr = (u32) node_map->nr;
|
||||
nr = (u32) perf_cpu_map__nr(node_map);
|
||||
|
||||
tp = zalloc(sizeof(*tp) + sizeof(tp->nodes[0])*nr);
|
||||
if (!tp)
|
||||
|
@ -334,7 +334,7 @@ struct numa_topology *numa_topology__new(void)
|
|||
tp->nr = nr;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (load_numa_node(&tp->nodes[i], node_map->map[i].cpu)) {
|
||||
if (load_numa_node(&tp->nodes[i], perf_cpu_map__cpu(node_map, i).cpu)) {
|
||||
numa_topology__delete(tp);
|
||||
tp = NULL;
|
||||
break;
|
||||
|
|
|
@ -124,22 +124,23 @@ int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
|
|||
|
||||
events_nr++;
|
||||
|
||||
if (matched_cpus->nr > 0 && (unmatched_cpus->nr > 0 ||
|
||||
matched_cpus->nr < cpus->nr ||
|
||||
matched_cpus->nr < pmu->cpus->nr)) {
|
||||
if (perf_cpu_map__nr(matched_cpus) > 0 &&
|
||||
(perf_cpu_map__nr(unmatched_cpus) > 0 ||
|
||||
perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(cpus) ||
|
||||
perf_cpu_map__nr(matched_cpus) < perf_cpu_map__nr(pmu->cpus))) {
|
||||
perf_cpu_map__put(evsel->core.cpus);
|
||||
perf_cpu_map__put(evsel->core.own_cpus);
|
||||
evsel->core.cpus = perf_cpu_map__get(matched_cpus);
|
||||
evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
|
||||
|
||||
if (unmatched_cpus->nr > 0) {
|
||||
if (perf_cpu_map__nr(unmatched_cpus) > 0) {
|
||||
cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
|
||||
pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
|
||||
buf1, pmu->name, evsel->name);
|
||||
}
|
||||
}
|
||||
|
||||
if (matched_cpus->nr == 0) {
|
||||
if (perf_cpu_map__nr(matched_cpus) == 0) {
|
||||
evlist__remove(evlist, evsel);
|
||||
evsel__delete(evsel);
|
||||
|
||||
|
|
|
@ -1782,7 +1782,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
|
|||
nthreads = threads->nr;
|
||||
|
||||
if (evsel->core.fd == NULL &&
|
||||
perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
|
||||
perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
|
||||
|
@ -2020,9 +2020,10 @@ retry_open:
|
|||
test_attr__ready();
|
||||
|
||||
pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
|
||||
pid, cpus->map[idx].cpu, group_fd, evsel->open_flags);
|
||||
pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
|
||||
|
||||
fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[idx].cpu,
|
||||
fd = sys_perf_event_open(&evsel->core.attr, pid,
|
||||
perf_cpu_map__cpu(cpus, idx).cpu,
|
||||
group_fd, evsel->open_flags);
|
||||
|
||||
FD(evsel, idx, thread) = fd;
|
||||
|
@ -2038,7 +2039,8 @@ retry_open:
|
|||
bpf_counter__install_pe(evsel, idx, fd);
|
||||
|
||||
if (unlikely(test_attr__enabled)) {
|
||||
test_attr__open(&evsel->core.attr, pid, cpus->map[idx],
|
||||
test_attr__open(&evsel->core.attr, pid,
|
||||
perf_cpu_map__cpu(cpus, idx),
|
||||
fd, group_fd, evsel->open_flags);
|
||||
}
|
||||
|
||||
|
@ -2079,7 +2081,8 @@ try_fallback:
|
|||
if (evsel__precise_ip_fallback(evsel))
|
||||
goto retry_open;
|
||||
|
||||
if (evsel__ignore_missing_thread(evsel, cpus->nr, idx, threads, thread, err)) {
|
||||
if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
|
||||
idx, threads, thread, err)) {
|
||||
/* We just removed 1 thread, so lower the upper nthreads limit. */
|
||||
nthreads--;
|
||||
|
||||
|
@ -2119,7 +2122,7 @@ out_close:
|
|||
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
|
||||
struct perf_thread_map *threads)
|
||||
{
|
||||
return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1);
|
||||
return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
|
||||
}
|
||||
|
||||
void evsel__close(struct evsel *evsel)
|
||||
|
@ -2131,8 +2134,7 @@ void evsel__close(struct evsel *evsel)
|
|||
int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
|
||||
{
|
||||
if (cpu_map_idx == -1)
|
||||
return evsel__open_cpu(evsel, cpus, NULL, 0,
|
||||
cpus ? cpus->nr : 1);
|
||||
return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
|
||||
|
||||
return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
|
||||
}
|
||||
|
@ -2982,7 +2984,7 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
|
|||
struct perf_cpu_map *cpus = evsel->core.cpus;
|
||||
struct perf_thread_map *threads = evsel->core.threads;
|
||||
|
||||
if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
|
||||
if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
|
||||
return -ENOMEM;
|
||||
|
||||
return store_evsel_ids(evsel, evlist);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <perf/evsel.h>
|
||||
#include "symbol_conf.h"
|
||||
#include <internal/cpumap.h>
|
||||
#include <perf/cpumap.h>
|
||||
|
||||
struct bpf_object;
|
||||
struct cgroup;
|
||||
|
@ -191,7 +192,7 @@ static inline struct perf_cpu_map *evsel__cpus(struct evsel *evsel)
|
|||
|
||||
static inline int evsel__nr_cpus(struct evsel *evsel)
|
||||
{
|
||||
return evsel__cpus(evsel)->nr;
|
||||
return perf_cpu_map__nr(evsel__cpus(evsel));
|
||||
}
|
||||
|
||||
void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
|
||||
|
|
|
@ -250,7 +250,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
|
|||
|
||||
nr_cpus = perf_cpu_map__nr(cpu_map);
|
||||
for (idx = 0; idx < nr_cpus; idx++) {
|
||||
cpu = cpu_map->map[idx]; /* map c index to online cpu index */
|
||||
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
|
||||
if (cpu__get_node(cpu) == node)
|
||||
set_bit(cpu.cpu, mask->bits);
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
|
|||
cpus = perf_cpu_map__new(NULL);
|
||||
if (!cpus)
|
||||
return false;
|
||||
cpu = cpus->map[0];
|
||||
cpu = perf_cpu_map__cpu(cpus, 0);
|
||||
perf_cpu_map__put(cpus);
|
||||
|
||||
do {
|
||||
|
@ -144,7 +144,7 @@ bool perf_can_record_cpu_wide(void)
|
|||
if (!cpus)
|
||||
return false;
|
||||
|
||||
cpu = cpus->map[0];
|
||||
cpu = perf_cpu_map__cpu(cpus, 0);
|
||||
perf_cpu_map__put(cpus);
|
||||
|
||||
fd = sys_perf_event_open(&attr, -1, cpu.cpu, -1, 0);
|
||||
|
|
|
@ -106,7 +106,7 @@ void evlist__config(struct evlist *evlist, struct record_opts *opts, struct call
|
|||
if (opts->group)
|
||||
evlist__set_leader(evlist);
|
||||
|
||||
if (evlist->core.cpus->map[0].cpu < 0)
|
||||
if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
|
||||
opts->no_inherit = true;
|
||||
|
||||
use_comm_exec = perf_can_comm_exec();
|
||||
|
@ -248,11 +248,11 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
|
|||
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
|
||||
|
||||
if (cpus)
|
||||
cpu = cpus->map[0];
|
||||
cpu = perf_cpu_map__cpu(cpus, 0);
|
||||
|
||||
perf_cpu_map__put(cpus);
|
||||
} else {
|
||||
cpu = evlist->core.cpus->map[0];
|
||||
cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -1607,8 +1607,8 @@ static void python_process_stat(struct perf_stat_config *config,
|
|||
}
|
||||
|
||||
for (thread = 0; thread < threads->nr; thread++) {
|
||||
for (cpu = 0; cpu < cpus->nr; cpu++) {
|
||||
process_stat(counter, cpus->map[cpu],
|
||||
for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
|
||||
process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
|
||||
perf_thread_map__pid(threads, thread), tstamp,
|
||||
perf_counts(counter->counts, cpu, thread));
|
||||
}
|
||||
|
|
|
@ -2537,8 +2537,8 @@ int perf_session__cpu_bitmap(struct perf_session *session,
|
|||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
struct perf_cpu cpu = map->map[i];
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
|
||||
|
||||
if (cpu.cpu >= nr_cpus) {
|
||||
pr_err("Requested CPU %d too large. "
|
||||
|
|
|
@ -734,8 +734,8 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
|
|||
if (!m)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < m->nr; i++) {
|
||||
c = m->map[i];
|
||||
for (i = 0; i < perf_cpu_map__nr(m); i++) {
|
||||
c = perf_cpu_map__cpu(m, i);
|
||||
if (c.cpu >= nr_cpus) {
|
||||
ret = -1;
|
||||
break;
|
||||
|
|
|
@ -1186,12 +1186,12 @@ int perf_event__synthesize_thread_map2(struct perf_tool *tool,
|
|||
static void synthesize_cpus(struct cpu_map_entries *cpus,
|
||||
struct perf_cpu_map *map)
|
||||
{
|
||||
int i;
|
||||
int i, map_nr = perf_cpu_map__nr(map);
|
||||
|
||||
cpus->nr = map->nr;
|
||||
cpus->nr = map_nr;
|
||||
|
||||
for (i = 0; i < map->nr; i++)
|
||||
cpus->cpu[i] = map->map[i].cpu;
|
||||
for (i = 0; i < map_nr; i++)
|
||||
cpus->cpu[i] = perf_cpu_map__cpu(map, i).cpu;
|
||||
}
|
||||
|
||||
static void synthesize_mask(struct perf_record_record_cpu_map *mask,
|
||||
|
@ -1202,13 +1202,13 @@ static void synthesize_mask(struct perf_record_record_cpu_map *mask,
|
|||
mask->nr = BITS_TO_LONGS(max);
|
||||
mask->long_size = sizeof(long);
|
||||
|
||||
for (i = 0; i < map->nr; i++)
|
||||
set_bit(map->map[i].cpu, mask->mask);
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++)
|
||||
set_bit(perf_cpu_map__cpu(map, i).cpu, mask->mask);
|
||||
}
|
||||
|
||||
static size_t cpus_size(struct perf_cpu_map *map)
|
||||
{
|
||||
return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
|
||||
return sizeof(struct cpu_map_entries) + perf_cpu_map__nr(map) * sizeof(u16);
|
||||
}
|
||||
|
||||
static size_t mask_size(struct perf_cpu_map *map, int *max)
|
||||
|
@ -1217,9 +1217,9 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
|
|||
|
||||
*max = 0;
|
||||
|
||||
for (i = 0; i < map->nr; i++) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++) {
|
||||
/* bit position of the cpu is + 1 */
|
||||
int bit = map->map[i].cpu + 1;
|
||||
int bit = perf_cpu_map__cpu(map, i).cpu + 1;
|
||||
|
||||
if (bit > *max)
|
||||
*max = bit;
|
||||
|
|
|
@ -95,15 +95,15 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
|
|||
|
||||
if (target->cpu_list)
|
||||
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
|
||||
top->evlist->core.cpus->nr > 1 ? "s" : "",
|
||||
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
|
||||
target->cpu_list);
|
||||
else {
|
||||
if (target->tid)
|
||||
ret += SNPRINTF(bf + ret, size - ret, ")");
|
||||
else
|
||||
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
|
||||
top->evlist->core.cpus->nr,
|
||||
top->evlist->core.cpus->nr > 1 ? "s" : "");
|
||||
perf_cpu_map__nr(top->evlist->core.cpus),
|
||||
perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
|
||||
}
|
||||
|
||||
perf_top__reset_sample_counters(top);
|
||||
|
|
Загрузка…
Ссылка в новой задаче