perf lock: Print the number of lost entries for BPF
Like the normal 'perf lock contention' output, it'd print the number of lost entries for BPF if exists or -v option is passed. Currently it uses BROKEN_CONTENDED stat for the lost count (due to full stack maps). $ sudo perf lock con -a -b --map-nr-entries 128 sleep 5 ... === output for debug=== bad: 43, total: 14903 bad rate: 0.29 % histogram of events caused bad sequence acquire: 0 acquired: 0 contended: 43 release: 0 Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Blake Jones <blakejones@google.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <songliubraving@fb.com> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220802191004.347740-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
ceb13bfc01
Коммит
6d499a6b3d
|
@ -1472,8 +1472,11 @@ static void print_contention_result(void)
|
|||
pr_info(" %10s %s\n\n", "type", "caller");
|
||||
|
||||
bad = total = 0;
|
||||
if (use_bpf)
|
||||
bad = bad_hist[BROKEN_CONTENDED];
|
||||
|
||||
while ((st = pop_from_result())) {
|
||||
total++;
|
||||
total += use_bpf ? st->nr_contended : 1;
|
||||
if (st->broken)
|
||||
bad++;
|
||||
|
||||
|
@ -1687,6 +1690,9 @@ static int __cmd_contention(int argc, const char **argv)
|
|||
|
||||
lock_contention_stop();
|
||||
lock_contention_read(&con);
|
||||
|
||||
/* abuse bad hist stats for lost entries */
|
||||
bad_hist[BROKEN_CONTENDED] = con.lost;
|
||||
} else {
|
||||
err = perf_session__process_events(session);
|
||||
if (err)
|
||||
|
|
|
@ -16,7 +16,7 @@ static struct lock_contention_bpf *skel;
|
|||
|
||||
/* should be same as bpf_skel/lock_contention.bpf.c */
|
||||
struct lock_contention_key {
|
||||
u32 stack_id;
|
||||
s32 stack_id;
|
||||
};
|
||||
|
||||
struct lock_contention_data {
|
||||
|
@ -110,7 +110,7 @@ int lock_contention_stop(void)
|
|||
int lock_contention_read(struct lock_contention *con)
|
||||
{
|
||||
int fd, stack;
|
||||
u32 prev_key, key;
|
||||
s32 prev_key, key;
|
||||
struct lock_contention_data data;
|
||||
struct lock_stat *st;
|
||||
struct machine *machine = con->machine;
|
||||
|
@ -119,6 +119,8 @@ int lock_contention_read(struct lock_contention *con)
|
|||
fd = bpf_map__fd(skel->maps.lock_stat);
|
||||
stack = bpf_map__fd(skel->maps.stacks);
|
||||
|
||||
con->lost = skel->bss->lost;
|
||||
|
||||
prev_key = 0;
|
||||
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
|
||||
struct map *kmap;
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#define MAX_ENTRIES 10240
|
||||
|
||||
struct contention_key {
|
||||
__u32 stack_id;
|
||||
__s32 stack_id;
|
||||
};
|
||||
|
||||
struct contention_data {
|
||||
|
@ -27,7 +27,7 @@ struct tstamp_data {
|
|||
__u64 timestamp;
|
||||
__u64 lock;
|
||||
__u32 flags;
|
||||
__u32 stack_id;
|
||||
__s32 stack_id;
|
||||
};
|
||||
|
||||
/* callstack storage */
|
||||
|
@ -73,6 +73,9 @@ int enabled;
|
|||
int has_cpu;
|
||||
int has_task;
|
||||
|
||||
/* error stat */
|
||||
unsigned long lost;
|
||||
|
||||
static inline int can_record(void)
|
||||
{
|
||||
if (has_cpu) {
|
||||
|
@ -116,6 +119,8 @@ int contention_begin(u64 *ctx)
|
|||
pelem->flags = (__u32)ctx[1];
|
||||
pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
|
||||
|
||||
if (pelem->stack_id < 0)
|
||||
lost++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -113,6 +113,7 @@ struct lock_contention {
|
|||
struct machine *machine;
|
||||
struct hlist_head *result;
|
||||
unsigned long map_nr_entries;
|
||||
unsigned long lost;
|
||||
};
|
||||
|
||||
#ifdef HAVE_BPF_SKEL
|
||||
|
|
Загрузка…
Ссылка в новой задаче