Merge branch 'Introduce access remote cpu elem support in BPF percpu map'
Feng zhou says: ==================== From: Feng Zhou <zhoufeng.zf@bytedance.com> Trace some functions, such as enqueue_task_fair, need to access the corresponding cpu, not the current cpu, and bpf_map_lookup_elem percpu map cannot do it. So add bpf_map_lookup_percpu_elem to accomplish it for percpu_array_map, percpu_hash_map, lru_percpu_hash_map. v1->v2: Addressed comments from Alexei Starovoitov. - add a selftest for bpf_map_lookup_percpu_elem. ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Коммит
0bed8f374a
|
@ -89,6 +89,7 @@ struct bpf_map_ops {
|
|||
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
|
||||
int (*map_pop_elem)(struct bpf_map *map, void *value);
|
||||
int (*map_peek_elem)(struct bpf_map *map, void *value);
|
||||
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
|
||||
|
||||
/* funcs called by prog_array and perf_event_array map */
|
||||
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
|
||||
|
@ -2184,6 +2185,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
|
|||
extern const struct bpf_func_proto bpf_map_push_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_map_pop_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_map_peek_elem_proto;
|
||||
extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
|
||||
|
||||
extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
|
||||
extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
|
||||
|
|
|
@ -5164,6 +5164,14 @@ union bpf_attr {
|
|||
* if not NULL, is a reference which must be released using its
|
||||
* corresponding release function, or moved into a BPF map before
|
||||
* program exit.
|
||||
*
|
||||
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
|
||||
* Description
|
||||
* Perform a lookup in *percpu map* for an entry associated to
|
||||
* *key* on *cpu*.
|
||||
* Return
|
||||
* Map value associated to *key* on *cpu*, or **NULL** if no entry
|
||||
* was found or *cpu* is invalid.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5361,6 +5369,7 @@ union bpf_attr {
|
|||
FN(skb_set_tstamp), \
|
||||
FN(ima_file_hash), \
|
||||
FN(kptr_xchg), \
|
||||
FN(map_lookup_percpu_elem), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
|
@ -243,6 +243,20 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
return this_cpu_ptr(array->pptrs[index & array->index_mask]);
|
||||
}
|
||||
|
||||
static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u32 index = *(u32 *)key;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return NULL;
|
||||
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
return NULL;
|
||||
|
||||
return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
|
||||
}
|
||||
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
|
@ -725,6 +739,7 @@ const struct bpf_map_ops percpu_array_map_ops = {
|
|||
.map_lookup_elem = percpu_array_map_lookup_elem,
|
||||
.map_update_elem = array_map_update_elem,
|
||||
.map_delete_elem = array_map_delete_elem,
|
||||
.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
|
||||
.map_seq_show_elem = percpu_array_map_seq_show_elem,
|
||||
.map_check_btf = array_map_check_btf,
|
||||
.map_lookup_batch = generic_map_lookup_batch,
|
||||
|
|
|
@ -2619,6 +2619,7 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
|
|||
const struct bpf_func_proto bpf_map_push_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_spin_lock_proto __weak;
|
||||
const struct bpf_func_proto bpf_spin_unlock_proto __weak;
|
||||
const struct bpf_func_proto bpf_jiffies64_proto __weak;
|
||||
|
|
|
@ -2199,6 +2199,20 @@ static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *htab_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
|
||||
{
|
||||
struct htab_elem *l;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return NULL;
|
||||
|
||||
l = __htab_map_lookup_elem(map, key);
|
||||
if (l)
|
||||
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct htab_elem *l = __htab_map_lookup_elem(map, key);
|
||||
|
@ -2211,6 +2225,22 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void *htab_lru_percpu_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
|
||||
{
|
||||
struct htab_elem *l;
|
||||
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return NULL;
|
||||
|
||||
l = __htab_map_lookup_elem(map, key);
|
||||
if (l) {
|
||||
bpf_lru_node_set_ref(&l->lru_node);
|
||||
return per_cpu_ptr(htab_elem_get_ptr(l, map->key_size), cpu);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
|
||||
{
|
||||
struct htab_elem *l;
|
||||
|
@ -2300,6 +2330,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
|
|||
.map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
|
||||
.map_update_elem = htab_percpu_map_update_elem,
|
||||
.map_delete_elem = htab_map_delete_elem,
|
||||
.map_lookup_percpu_elem = htab_percpu_map_lookup_percpu_elem,
|
||||
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
|
@ -2318,6 +2349,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
|
|||
.map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
|
||||
.map_update_elem = htab_lru_percpu_map_update_elem,
|
||||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
.map_lookup_percpu_elem = htab_lru_percpu_map_lookup_percpu_elem,
|
||||
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
|
|
|
@ -119,6 +119,22 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
|||
.arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
|
||||
.func = bpf_map_lookup_percpu_elem,
|
||||
.gpl_only = false,
|
||||
.pkt_access = true,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
|
||||
.func = bpf_user_rnd_u32,
|
||||
.gpl_only = false,
|
||||
|
@ -1420,6 +1436,8 @@ bpf_base_func_proto(enum bpf_func_id func_id)
|
|||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_map_lookup_percpu_elem:
|
||||
return &bpf_map_lookup_percpu_elem_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_get_smp_processor_id:
|
||||
|
|
|
@ -6137,6 +6137,12 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
|||
map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_map_lookup_percpu_elem:
|
||||
if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
|
||||
map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
|
||||
map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_FUNC_sk_storage_get:
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
|
||||
|
@ -6750,7 +6756,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
|||
func_id != BPF_FUNC_map_pop_elem &&
|
||||
func_id != BPF_FUNC_map_peek_elem &&
|
||||
func_id != BPF_FUNC_for_each_map_elem &&
|
||||
func_id != BPF_FUNC_redirect_map)
|
||||
func_id != BPF_FUNC_redirect_map &&
|
||||
func_id != BPF_FUNC_map_lookup_percpu_elem)
|
||||
return 0;
|
||||
|
||||
if (map == NULL) {
|
||||
|
@ -13810,7 +13817,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
insn->imm == BPF_FUNC_map_pop_elem ||
|
||||
insn->imm == BPF_FUNC_map_peek_elem ||
|
||||
insn->imm == BPF_FUNC_redirect_map ||
|
||||
insn->imm == BPF_FUNC_for_each_map_elem)) {
|
||||
insn->imm == BPF_FUNC_for_each_map_elem ||
|
||||
insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (bpf_map_ptr_poisoned(aux))
|
||||
goto patch_call_imm;
|
||||
|
@ -13859,6 +13867,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
bpf_callback_t callback_fn,
|
||||
void *callback_ctx,
|
||||
u64 flags))NULL));
|
||||
BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
|
||||
(void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
|
||||
|
||||
patch_map_ops_generic:
|
||||
switch (insn->imm) {
|
||||
|
@ -13886,6 +13896,9 @@ patch_map_ops_generic:
|
|||
case BPF_FUNC_for_each_map_elem:
|
||||
insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
|
||||
continue;
|
||||
case BPF_FUNC_map_lookup_percpu_elem:
|
||||
insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
|
||||
continue;
|
||||
}
|
||||
|
||||
goto patch_call_imm;
|
||||
|
|
|
@ -1197,6 +1197,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_map_lookup_percpu_elem:
|
||||
return &bpf_map_lookup_percpu_elem_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
|
|
|
@ -5164,6 +5164,14 @@ union bpf_attr {
|
|||
* if not NULL, is a reference which must be released using its
|
||||
* corresponding release function, or moved into a BPF map before
|
||||
* program exit.
|
||||
*
|
||||
* void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
|
||||
* Description
|
||||
* Perform a lookup in *percpu map* for an entry associated to
|
||||
* *key* on *cpu*.
|
||||
* Return
|
||||
* Map value associated to *key* on *cpu*, or **NULL** if no entry
|
||||
* was found or *cpu* is invalid.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -5361,6 +5369,7 @@ union bpf_attr {
|
|||
FN(skb_set_tstamp), \
|
||||
FN(ima_file_hash), \
|
||||
FN(kptr_xchg), \
|
||||
FN(map_lookup_percpu_elem), \
|
||||
/* */
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2022 Bytedance
|
||||
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "test_map_lookup_percpu_elem.skel.h"
|
||||
|
||||
#define TEST_VALUE 1
|
||||
|
||||
void test_map_lookup_percpu_elem(void)
|
||||
{
|
||||
struct test_map_lookup_percpu_elem *skel;
|
||||
int key = 0, ret;
|
||||
int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
|
||||
int *buf;
|
||||
|
||||
buf = (int *)malloc(nr_cpus*sizeof(int));
|
||||
if (!ASSERT_OK_PTR(buf, "malloc"))
|
||||
return;
|
||||
memset(buf, 0, nr_cpus*sizeof(int));
|
||||
buf[0] = TEST_VALUE;
|
||||
|
||||
skel = test_map_lookup_percpu_elem__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load"))
|
||||
return;
|
||||
ret = test_map_lookup_percpu_elem__attach(skel);
|
||||
ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach");
|
||||
|
||||
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0);
|
||||
ASSERT_OK(ret, "percpu_array_map update");
|
||||
|
||||
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0);
|
||||
ASSERT_OK(ret, "percpu_hash_map update");
|
||||
|
||||
ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0);
|
||||
ASSERT_OK(ret, "percpu_lru_hash_map update");
|
||||
|
||||
syscall(__NR_getuid);
|
||||
|
||||
ret = skel->bss->percpu_array_elem_val == TEST_VALUE &&
|
||||
skel->bss->percpu_hash_elem_val == TEST_VALUE &&
|
||||
skel->bss->percpu_lru_hash_elem_val == TEST_VALUE;
|
||||
ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success");
|
||||
|
||||
test_map_lookup_percpu_elem__destroy(skel);
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2022 Bytedance
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
int percpu_array_elem_val = 0;
|
||||
int percpu_hash_elem_val = 0;
|
||||
int percpu_lru_hash_elem_val = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} percpu_array_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} percpu_hash_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, __u32);
|
||||
__type(value, __u32);
|
||||
} percpu_lru_hash_map SEC(".maps");
|
||||
|
||||
SEC("tp/syscalls/sys_enter_getuid")
|
||||
int sysenter_getuid(const void *ctx)
|
||||
{
|
||||
__u32 key = 0;
|
||||
__u32 cpu = 0;
|
||||
__u32 *value;
|
||||
|
||||
value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu);
|
||||
if (value)
|
||||
percpu_array_elem_val = *value;
|
||||
|
||||
value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu);
|
||||
if (value)
|
||||
percpu_hash_elem_val = *value;
|
||||
|
||||
value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu);
|
||||
if (value)
|
||||
percpu_lru_hash_elem_val = *value;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
Загрузка…
Ссылка в новой задаче