bpf: Add hashtab support for bpf_for_each_map_elem() helper
This patch added support for hashmap, percpu hashmap, lru hashmap and percpu lru hashmap. Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20210226204927.3885020-1-yhs@fb.com
This commit is contained in:
Родитель
69c087ba62
Коммит
314ee05e2f
|
@ -1397,6 +1397,10 @@ void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
|
|||
int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
|
||||
struct bpf_link_info *info);
|
||||
|
||||
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee);
|
||||
|
||||
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
|
||||
int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
|
||||
|
|
|
@ -1869,6 +1869,63 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
|||
.seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
|
||||
};
|
||||
|
||||
static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
|
||||
void *callback_ctx, u64 flags)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_nulls_head *head;
|
||||
struct hlist_nulls_node *n;
|
||||
struct htab_elem *elem;
|
||||
u32 roundup_key_size;
|
||||
int i, num_elems = 0;
|
||||
void __percpu *pptr;
|
||||
struct bucket *b;
|
||||
void *key, *val;
|
||||
bool is_percpu;
|
||||
u64 ret = 0;
|
||||
|
||||
if (flags != 0)
|
||||
return -EINVAL;
|
||||
|
||||
is_percpu = htab_is_percpu(htab);
|
||||
|
||||
roundup_key_size = round_up(map->key_size, 8);
|
||||
/* disable migration so percpu value prepared here will be the
|
||||
* same as the one seen by the bpf program with bpf_map_lookup_elem().
|
||||
*/
|
||||
if (is_percpu)
|
||||
migrate_disable();
|
||||
for (i = 0; i < htab->n_buckets; i++) {
|
||||
b = &htab->buckets[i];
|
||||
rcu_read_lock();
|
||||
head = &b->head;
|
||||
hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
|
||||
key = elem->key;
|
||||
if (is_percpu) {
|
||||
/* current cpu value for percpu map */
|
||||
pptr = htab_elem_get_ptr(elem, map->key_size);
|
||||
val = this_cpu_ptr(pptr);
|
||||
} else {
|
||||
val = elem->key + roundup_key_size;
|
||||
}
|
||||
num_elems++;
|
||||
ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
|
||||
(u64)(long)key, (u64)(long)val,
|
||||
(u64)(long)callback_ctx, 0);
|
||||
/* return value: 0 - continue, 1 - stop and return */
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
out:
|
||||
if (is_percpu)
|
||||
migrate_enable();
|
||||
return num_elems;
|
||||
}
|
||||
|
||||
static int htab_map_btf_id;
|
||||
const struct bpf_map_ops htab_map_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
|
@ -1881,6 +1938,8 @@ const struct bpf_map_ops htab_map_ops = {
|
|||
.map_delete_elem = htab_map_delete_elem,
|
||||
.map_gen_lookup = htab_map_gen_lookup,
|
||||
.map_seq_show_elem = htab_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
BATCH_OPS(htab),
|
||||
.map_btf_name = "bpf_htab",
|
||||
.map_btf_id = &htab_map_btf_id,
|
||||
|
@ -1900,6 +1959,8 @@ const struct bpf_map_ops htab_lru_map_ops = {
|
|||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
.map_gen_lookup = htab_lru_map_gen_lookup,
|
||||
.map_seq_show_elem = htab_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
BATCH_OPS(htab_lru),
|
||||
.map_btf_name = "bpf_htab",
|
||||
.map_btf_id = &htab_lru_map_btf_id,
|
||||
|
@ -2019,6 +2080,8 @@ const struct bpf_map_ops htab_percpu_map_ops = {
|
|||
.map_update_elem = htab_percpu_map_update_elem,
|
||||
.map_delete_elem = htab_map_delete_elem,
|
||||
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
BATCH_OPS(htab_percpu),
|
||||
.map_btf_name = "bpf_htab",
|
||||
.map_btf_id = &htab_percpu_map_btf_id,
|
||||
|
@ -2036,6 +2099,8 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
|
|||
.map_update_elem = htab_lru_percpu_map_update_elem,
|
||||
.map_delete_elem = htab_lru_map_delete_elem,
|
||||
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
|
||||
.map_set_for_each_callback_args = map_set_for_each_callback_args,
|
||||
.map_for_each_callback = bpf_for_each_hash_elem,
|
||||
BATCH_OPS(htab_lru_percpu),
|
||||
.map_btf_name = "bpf_htab",
|
||||
.map_btf_id = &htab_lru_percpu_map_btf_id,
|
||||
|
|
|
@ -5403,6 +5403,33 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||
return 0;
|
||||
}
|
||||
|
||||
int map_set_for_each_callback_args(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee)
|
||||
{
|
||||
/* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
|
||||
* void *callback_ctx, u64 flags);
|
||||
* callback_fn(struct bpf_map *map, void *key, void *value,
|
||||
* void *callback_ctx);
|
||||
*/
|
||||
callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
|
||||
|
||||
callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
|
||||
__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
|
||||
callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
|
||||
|
||||
callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
|
||||
__mark_reg_known_zero(&callee->regs[BPF_REG_3]);
|
||||
callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
|
||||
|
||||
/* pointer to stack or null */
|
||||
callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
|
||||
|
||||
/* unused */
|
||||
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_callee_state(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee, int insn_idx)
|
||||
|
|
Загрузка…
Ссылка в новой задаче