perf/bpf: Extend the perf_event_read_local() interface, a.k.a. "bpf: perf event change needed for subsequent bpf helpers"
eBPF programs would like access to the (perf) event enabled and running times along with the event value, such that they can deal with event multiplexing (among other things). This patch extends the interface; a future eBPF patch will utilize the new functionality. [ Note, there's a same-content commit with a poor changelog and a meaningless title in the networking tree as well - but we need this change for subsequent perf work, so apply it here as well, with a proper changelog. Hopefully Git will be able to sort out this somewhat messy workflow, if there are no other, conflicting changes to these files. ] Signed-off-by: Yonghong Song <yhs@fb.com> [ Rewrote the changelog. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <ast@fb.com> Cc: <daniel@iogearbox.net> Cc: <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: David S. Miller <davem@davemloft.net> Link: http://lkml.kernel.org/r/20171005161923.332790-2-yhs@fb.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
6856b8e536
Коммит
7d9285e82d
|
@ -806,6 +806,7 @@ struct perf_output_handle {
|
||||||
struct bpf_perf_event_data_kern {
|
struct bpf_perf_event_data_kern {
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
struct perf_sample_data *data;
|
struct perf_sample_data *data;
|
||||||
|
struct perf_event *event;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_PERF
|
#ifdef CONFIG_CGROUP_PERF
|
||||||
|
@ -884,7 +885,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
|
||||||
void *context);
|
void *context);
|
||||||
extern void perf_pmu_migrate_context(struct pmu *pmu,
|
extern void perf_pmu_migrate_context(struct pmu *pmu,
|
||||||
int src_cpu, int dst_cpu);
|
int src_cpu, int dst_cpu);
|
||||||
int perf_event_read_local(struct perf_event *event, u64 *value);
|
int perf_event_read_local(struct perf_event *event, u64 *value,
|
||||||
|
u64 *enabled, u64 *running);
|
||||||
extern u64 perf_event_read_value(struct perf_event *event,
|
extern u64 perf_event_read_value(struct perf_event *event,
|
||||||
u64 *enabled, u64 *running);
|
u64 *enabled, u64 *running);
|
||||||
|
|
||||||
|
@ -1286,7 +1288,8 @@ static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *
|
||||||
{
|
{
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
}
|
}
|
||||||
static inline int perf_event_read_local(struct perf_event *event, u64 *value)
|
static inline int perf_event_read_local(struct perf_event *event, u64 *value,
|
||||||
|
u64 *enabled, u64 *running)
|
||||||
{
|
{
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -492,7 +492,7 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
|
||||||
|
|
||||||
ee = ERR_PTR(-EOPNOTSUPP);
|
ee = ERR_PTR(-EOPNOTSUPP);
|
||||||
event = perf_file->private_data;
|
event = perf_file->private_data;
|
||||||
if (perf_event_read_local(event, &value) == -EOPNOTSUPP)
|
if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
ee = bpf_event_entry_gen(perf_file, map_file);
|
ee = bpf_event_entry_gen(perf_file, map_file);
|
||||||
|
|
|
@ -3684,10 +3684,12 @@ static inline u64 perf_event_count(struct perf_event *event)
|
||||||
* will not be local and we cannot read them atomically
|
* will not be local and we cannot read them atomically
|
||||||
* - must not have a pmu::count method
|
* - must not have a pmu::count method
|
||||||
*/
|
*/
|
||||||
int perf_event_read_local(struct perf_event *event, u64 *value)
|
int perf_event_read_local(struct perf_event *event, u64 *value,
|
||||||
|
u64 *enabled, u64 *running)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u64 now;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disabling interrupts avoids all counter scheduling (context
|
* Disabling interrupts avoids all counter scheduling (context
|
||||||
|
@ -3718,13 +3720,21 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
now = event->shadow_ctx_time + perf_clock();
|
||||||
|
if (enabled)
|
||||||
|
*enabled = now - event->tstamp_enabled;
|
||||||
/*
|
/*
|
||||||
* If the event is currently on this CPU, its either a per-task event,
|
* If the event is currently on this CPU, its either a per-task event,
|
||||||
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
|
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
|
||||||
* oncpu == -1).
|
* oncpu == -1).
|
||||||
*/
|
*/
|
||||||
if (event->oncpu == smp_processor_id())
|
if (event->oncpu == smp_processor_id()) {
|
||||||
event->pmu->read(event);
|
event->pmu->read(event);
|
||||||
|
if (running)
|
||||||
|
*running = now - event->tstamp_running;
|
||||||
|
} else if (running) {
|
||||||
|
*running = event->total_time_running;
|
||||||
|
}
|
||||||
|
|
||||||
*value = local64_read(&event->count);
|
*value = local64_read(&event->count);
|
||||||
out:
|
out:
|
||||||
|
@ -8072,6 +8082,7 @@ static void bpf_overflow_handler(struct perf_event *event,
|
||||||
struct bpf_perf_event_data_kern ctx = {
|
struct bpf_perf_event_data_kern ctx = {
|
||||||
.data = data,
|
.data = data,
|
||||||
.regs = regs,
|
.regs = regs,
|
||||||
|
.event = event,
|
||||||
};
|
};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
|
@ -275,7 +275,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
|
||||||
if (!ee)
|
if (!ee)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
|
||||||
err = perf_event_read_local(ee->event, &value);
|
err = perf_event_read_local(ee->event, &value, NULL, NULL);
|
||||||
/*
|
/*
|
||||||
* this api is ugly since we miss [-22..-2] range of valid
|
* this api is ugly since we miss [-22..-2] range of valid
|
||||||
* counter values, but that's uapi
|
* counter values, but that's uapi
|
||||||
|
|
Загрузка…
Ссылка в новой задаче