selftests/bpf: Convert some selftests to high-level BPF map APIs

Convert a bunch of selftests to using newly added high-level BPF map
APIs.

This change exposed that map_kptr selftests allocated too big buffer,
which is fixed in this patch as well.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220512220713.2617964-2-andrii@kernel.org
This commit is contained in:
Andrii Nakryiko 2022-05-12 15:07:13 -07:00 коммит произвёл Daniel Borkmann
Родитель 737d0646a8
Коммит b2531d4bdc
8 изменённых файлов: 61 добавлений и 47 удалений

Просмотреть файл

@ -167,7 +167,7 @@ void test_core_autosize(void)
if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
goto cleanup;
err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
if (!ASSERT_OK(err, "bss_lookup"))
goto cleanup;

Просмотреть файл

@ -6,31 +6,32 @@
void test_core_retro(void)
{
int err, zero = 0, res, duration = 0, my_pid = getpid();
int err, zero = 0, res, my_pid = getpid();
struct test_core_retro *skel;
/* load program */
skel = test_core_retro__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto out_close;
err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
&my_pid, sizeof(my_pid), 0);
if (!ASSERT_OK(err, "map_update"))
goto out_close;
/* attach probe */
err = test_core_retro__attach(skel);
if (CHECK(err, "attach_kprobe", "err %d\n", err))
if (!ASSERT_OK(err, "attach_kprobe"))
goto out_close;
/* trigger */
usleep(1);
err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
if (!ASSERT_OK(err, "map_lookup"))
goto out_close;
CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
ASSERT_EQ(res, my_pid, "pid_check");
out_close:
test_core_retro__destroy(skel);

Просмотреть файл

@ -10,9 +10,10 @@ static unsigned int duration;
static void test_hash_map(void)
{
int i, err, hashmap_fd, max_entries, percpu_map_fd;
int i, err, max_entries;
struct for_each_hash_map_elem *skel;
__u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u32 key, num_cpus;
__u64 val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
@ -25,26 +26,27 @@ static void test_hash_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
return;
hashmap_fd = bpf_map__fd(skel->maps.hashmap);
max_entries = bpf_map__max_entries(skel->maps.hashmap);
for (i = 0; i < max_entries; i++) {
key = i;
val = i + 1;
err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 1;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;
@ -58,7 +60,7 @@ static void test_hash_map(void)
ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
key = 1;
err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
ASSERT_ERR(err, "hashmap_lookup");
ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
@ -75,9 +77,10 @@ out:
static void test_array_map(void)
{
__u32 key, num_cpus, max_entries;
int i, arraymap_fd, percpu_map_fd, err;
int i, err;
struct for_each_array_map_elem *skel;
__u64 *percpu_valbuf = NULL;
size_t percpu_val_sz;
__u64 val, expected_total;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
@ -89,7 +92,6 @@ static void test_array_map(void)
if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
return;
arraymap_fd = bpf_map__fd(skel->maps.arraymap);
expected_total = 0;
max_entries = bpf_map__max_entries(skel->maps.arraymap);
for (i = 0; i < max_entries; i++) {
@ -98,21 +100,23 @@ static void test_array_map(void)
/* skip the last iteration for expected total */
if (i != max_entries - 1)
expected_total += val;
err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
&val, sizeof(val), BPF_ANY);
if (!ASSERT_OK(err, "map_update"))
goto out;
}
num_cpus = bpf_num_possible_cpus();
percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
percpu_val_sz = sizeof(__u64) * num_cpus;
percpu_valbuf = malloc(percpu_val_sz);
if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
goto out;
key = 0;
for (i = 0; i < num_cpus; i++)
percpu_valbuf[i] = i + 1;
err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
percpu_valbuf, percpu_val_sz, BPF_ANY);
if (!ASSERT_OK(err, "percpu_map_update"))
goto out;

Просмотреть файл

@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void)
/* Lookup and delete element. */
key = 1;
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void)
/* Lookup and delete element. */
key = 1;
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void)
goto cleanup;
/* Lookup and delete element 3. */
err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), &value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void)
value[i] = 0;
/* Lookup and delete element 3. */
err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
&key, sizeof(key), value, sizeof(value), 0);
if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
goto cleanup;
}
/* Check if only one CPU has set the value. */
for (i = 0; i < nr_cpus; i++) {

Просмотреть файл

@ -91,7 +91,7 @@ static void test_map_kptr_success(bool test_run)
);
struct map_kptr *skel;
int key = 0, ret;
char buf[24];
char buf[16];
skel = map_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
@ -107,24 +107,29 @@ static void test_map_kptr_success(bool test_run)
if (test_run)
return;
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "array_map update");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
ret = bpf_map__update_elem(skel->maps.array_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "array_map update2");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0);
ret = bpf_map__update_elem(skel->maps.hash_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "hash_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key);
ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_map delete");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0);
ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "hash_malloc_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key);
ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "hash_malloc_map delete");
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0);
ret = bpf_map__update_elem(skel->maps.lru_hash_map,
&key, sizeof(key), buf, sizeof(buf), 0);
ASSERT_OK(ret, "lru_hash_map update");
ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key);
ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
ASSERT_OK(ret, "lru_hash_map delete");
map_kptr__destroy(skel);

Просмотреть файл

@ -8,7 +8,7 @@ void test_stacktrace_build_id(void)
int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
struct test_stacktrace_build_id *skel;
int err, stack_trace_len;
__u32 key, previous_key, val, duration = 0;
__u32 key, prev_key, val, duration = 0;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@ -58,7 +58,7 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@ -79,8 +79,8 @@ retry:
if (strstr(buf, build_id) != NULL)
build_id_matches = 1;
}
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
prev_key = key;
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;

Просмотреть файл

@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void)
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
};
__u32 key, previous_key, val, duration = 0;
__u32 key, prev_key, val, duration = 0;
char buf[256];
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
@ -100,7 +100,7 @@ retry:
"err %d errno %d\n", err, errno))
goto cleanup;
err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key));
if (CHECK(err, "get_next_key from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@ -108,7 +108,8 @@ retry:
do {
char build_id[64];
err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key),
id_offs, sizeof(id_offs), 0);
if (CHECK(err, "lookup_elem from stackmap",
"err %d, errno %d\n", err, errno))
goto cleanup;
@ -121,8 +122,8 @@ retry:
if (strstr(buf, build_id) != NULL)
build_id_matches = 1;
}
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
prev_key = key;
} while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0);
/* stack_map_get_build_id_offset() is racy and sometimes can return
* BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;

Просмотреть файл

@ -35,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel)
ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok");
close(bpf_map__fd(timer_skel->maps.inner_htab));
err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1);
err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0);
ASSERT_EQ(err, 0, "delete inner map");
/* check that timer_cb[12] are no longer running */