perf record: Record dropped sample count
When it uses bpf filters, event might drop some samples. It'd be nice if it can report how many samples it lost. As LOST_SAMPLES event can carry the similar information, let's use it for bpf filters. To indicate it's from BPF filters, add a new misc flag for that and do not display cpu load warnings. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Hao Luo <haoluo@google.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: James Clark <james.clark@arm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20230314234237.3008956-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
d180aa56b5
Коммит
27c6f2455b
|
@ -70,6 +70,8 @@ struct perf_record_lost {
|
|||
__u64 lost;
|
||||
};
|
||||
|
||||
#define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15)
|
||||
|
||||
struct perf_record_lost_samples {
|
||||
struct perf_event_header header;
|
||||
__u64 lost;
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "util/pmu-hybrid.h"
|
||||
#include "util/evlist-hybrid.h"
|
||||
#include "util/off_cpu.h"
|
||||
#include "util/bpf-filter.h"
|
||||
#include "asm/bug.h"
|
||||
#include "perf.h"
|
||||
#include "cputopo.h"
|
||||
|
@ -1856,24 +1857,16 @@ record__switch_output(struct record *rec, bool at_exit)
|
|||
return fd;
|
||||
}
|
||||
|
||||
static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
|
||||
static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
|
||||
struct perf_record_lost_samples *lost,
|
||||
int cpu_idx, int thread_idx)
|
||||
int cpu_idx, int thread_idx, u64 lost_count,
|
||||
u16 misc_flag)
|
||||
{
|
||||
struct perf_counts_values count;
|
||||
struct perf_sample_id *sid;
|
||||
struct perf_sample sample = {};
|
||||
int id_hdr_size;
|
||||
|
||||
if (perf_evsel__read(&evsel->core, cpu_idx, thread_idx, &count) < 0) {
|
||||
pr_debug("read LOST count failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (count.lost == 0)
|
||||
return;
|
||||
|
||||
lost->lost = count.lost;
|
||||
lost->lost = lost_count;
|
||||
if (evsel->core.ids) {
|
||||
sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
|
||||
sample.id = sid->id;
|
||||
|
@ -1882,6 +1875,7 @@ static void __record__read_lost_samples(struct record *rec, struct evsel *evsel,
|
|||
id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
|
||||
evsel->core.attr.sample_type, &sample);
|
||||
lost->header.size = sizeof(*lost) + id_hdr_size;
|
||||
lost->header.misc = misc_flag;
|
||||
record__write(rec, NULL, lost, lost->header.size);
|
||||
}
|
||||
|
||||
|
@ -1905,6 +1899,7 @@ static void record__read_lost_samples(struct record *rec)
|
|||
|
||||
evlist__for_each_entry(session->evlist, evsel) {
|
||||
struct xyarray *xy = evsel->core.sample_id;
|
||||
u64 lost_count;
|
||||
|
||||
if (xy == NULL || evsel->core.fd == NULL)
|
||||
continue;
|
||||
|
@ -1916,12 +1911,27 @@ static void record__read_lost_samples(struct record *rec)
|
|||
|
||||
for (int x = 0; x < xyarray__max_x(xy); x++) {
|
||||
for (int y = 0; y < xyarray__max_y(xy); y++) {
|
||||
__record__read_lost_samples(rec, evsel, lost, x, y);
|
||||
}
|
||||
}
|
||||
}
|
||||
free(lost);
|
||||
struct perf_counts_values count;
|
||||
|
||||
if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
|
||||
pr_debug("read LOST count failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (count.lost) {
|
||||
__record__save_lost_samples(rec, evsel, lost,
|
||||
x, y, count.lost, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lost_count = perf_bpf_filter__lost_count(evsel);
|
||||
if (lost_count)
|
||||
__record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
|
||||
PERF_RECORD_MISC_LOST_SAMPLES_BPF);
|
||||
}
|
||||
out:
|
||||
free(lost);
|
||||
}
|
||||
|
||||
static volatile sig_atomic_t workload_exec_errno;
|
||||
|
|
|
@ -69,6 +69,13 @@ int perf_bpf_filter__destroy(struct evsel *evsel)
|
|||
return 0;
|
||||
}
|
||||
|
||||
u64 perf_bpf_filter__lost_count(struct evsel *evsel)
|
||||
{
|
||||
struct sample_filter_bpf *skel = evsel->bpf_skel;
|
||||
|
||||
return skel ? skel->bss->dropped : 0;
|
||||
}
|
||||
|
||||
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flags,
|
||||
enum perf_bpf_filter_op op,
|
||||
unsigned long val)
|
||||
|
|
|
@ -22,6 +22,7 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(unsigned long sample_flag
|
|||
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
|
||||
int perf_bpf_filter__prepare(struct evsel *evsel);
|
||||
int perf_bpf_filter__destroy(struct evsel *evsel);
|
||||
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
|
||||
|
||||
#else /* !HAVE_BPF_SKEL */
|
||||
|
||||
|
@ -38,5 +39,9 @@ static inline int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused)
|
|||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline u64 perf_bpf_filter__lost_count(struct evsel *evsel __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* HAVE_BPF_SKEL*/
|
||||
#endif /* PERF_UTIL_BPF_FILTER_H */
|
||||
|
|
|
@ -1582,7 +1582,8 @@ static int machines__deliver_event(struct machines *machines,
|
|||
evlist->stats.total_lost += event->lost.lost;
|
||||
return tool->lost(tool, event, sample, machine);
|
||||
case PERF_RECORD_LOST_SAMPLES:
|
||||
if (tool->lost_samples == perf_event__process_lost_samples)
|
||||
if (tool->lost_samples == perf_event__process_lost_samples &&
|
||||
!(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
|
||||
evlist->stats.total_lost_samples += event->lost_samples.lost;
|
||||
return tool->lost_samples(tool, event, sample, machine);
|
||||
case PERF_RECORD_READ:
|
||||
|
|
Загрузка…
Ссылка в новой задаче