From 346938e9380cc0b2ad8e2566389cdc570386fe22 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:48 -0700 Subject: [PATCH] selftests/bpf: Add get_stackid_cannot_attach This test confirms that BPF program that calls bpf_get_stackid() cannot attach to perf_event with precise_ip > 0 but not PERF_SAMPLE_CALLCHAIN; and cannot attach if the perf_event has exclude_callchain_kernel. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-6-songliubraving@fb.com --- .../prog_tests/get_stackid_cannot_attach.c | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c new file mode 100644 index 000000000000..d884b2ed5bc5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include +#include "test_stacktrace_build_id.skel.h" + +void test_get_stackid_cannot_attach(void) +{ + struct perf_event_attr attr = { + /* .type = PERF_TYPE_SOFTWARE, */ + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .precise_ip = 1, + .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK, + .branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_NO_FLAGS | + PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_CALL_STACK, + .sample_period = 5000, + .size = sizeof(struct perf_event_attr), + }; + struct test_stacktrace_build_id *skel; + __u32 duration = 0; + int pmu_fd, err; + + skel = test_stacktrace_build_id__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + /* override program type */ + bpf_program__set_perf_event(skel->progs.oncpu); + + err = test_stacktrace_build_id__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) { + printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n", + __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_no_callchain", + "should have failed\n"); + close(pmu_fd); + + /* add PERF_SAMPLE_CALLCHAIN, attach should succeed */ + attr.sample_type |= PERF_SAMPLE_CALLCHAIN; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event_callchain", + "err: %ld\n", PTR_ERR(skel->links.oncpu)); + close(pmu_fd); + + /* add exclude_callchain_kernel, attach should fail */ + attr.exclude_callchain_kernel = 1; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_exclude_callchain_kernel", + "should have failed\n"); + close(pmu_fd); + +cleanup: + test_stacktrace_build_id__destroy(skel); +}