bpf: Remove redundant synchronize_rcu.

bpf_free_used_maps() or close(map_fd) will trigger map_free callback.
bpf_free_used_maps() is called after bpf prog is no longer executing:
bpf_prog_put->call_rcu->bpf_prog_free->bpf_free_used_maps.
Hence there is no need to call synchronize_rcu() to protect map elements.

Note that hash_of_maps and array_of_maps update/delete inner maps via
sys_bpf() that calls maybe_wait_bpf_programs() and synchronize_rcu().

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Paul E. McKenney <paulmck@kernel.org>
Link: https://lore.kernel.org/bpf/20200630043343.53195-2-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2020-06-29 21:33:39 -07:00
Родитель 8c18311067
Коммит bba1dc0b55
7 изменённых файлов: 3 добавлений и 38 удалений

Просмотреть файл

@ -386,13 +386,6 @@ static void array_map_free(struct bpf_map *map)
{ {
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were
* disconnected from events. Wait for outstanding programs to complete
* and free the array
*/
synchronize_rcu();
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array); bpf_array_free_percpu(array);
@ -546,8 +539,6 @@ static void fd_array_map_free(struct bpf_map *map)
struct bpf_array *array = container_of(map, struct bpf_array, map); struct bpf_array *array = container_of(map, struct bpf_array, map);
int i; int i;
synchronize_rcu();
/* make sure it's empty */ /* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++) for (i = 0; i < array->map.max_entries; i++)
BUG_ON(array->ptrs[i] != NULL); BUG_ON(array->ptrs[i] != NULL);

Просмотреть файл

@ -1290,12 +1290,10 @@ static void htab_map_free(struct bpf_map *map)
{ {
struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
* so the programs (can be more than one that used this map) were * bpf_free_used_maps() is called after bpf prog is no longer executing.
* disconnected from events. Wait for outstanding critical sections in * There is no need to synchronize_rcu() here to protect map elements.
* these programs to complete
*/ */
synchronize_rcu();
/* some of free_htab_elem() callbacks for elements of this map may /* some of free_htab_elem() callbacks for elements of this map may
* not have executed. Wait for them. * not have executed. Wait for them.

Просмотреть файл

@ -589,11 +589,6 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot; struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node; struct lpm_trie_node *node;
/* Wait for outstanding programs to complete
* update/lookup/delete/get_next_key and free the trie.
*/
synchronize_rcu();
/* Always start at the root and walk down to a node that has no /* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent * children. Then free that node, nullify its reference in the parent
* and start over. * and start over.

Просмотреть файл

@ -101,13 +101,6 @@ static void queue_stack_map_free(struct bpf_map *map)
{ {
struct bpf_queue_stack *qs = bpf_queue_stack(map); struct bpf_queue_stack *qs = bpf_queue_stack(map);
/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were
* disconnected from events. Wait for outstanding critical sections in
* these programs to complete
*/
synchronize_rcu();
bpf_map_area_free(qs); bpf_map_area_free(qs);
} }

Просмотреть файл

@ -96,8 +96,6 @@ static void reuseport_array_free(struct bpf_map *map)
struct sock *sk; struct sock *sk;
u32 i; u32 i;
synchronize_rcu();
/* /*
* ops->map_*_elem() will not be able to access this * ops->map_*_elem() will not be able to access this
* array now. Hence, this function only races with * array now. Hence, this function only races with

Просмотреть файл

@ -215,13 +215,6 @@ static void ringbuf_map_free(struct bpf_map *map)
{ {
struct bpf_ringbuf_map *rb_map; struct bpf_ringbuf_map *rb_map;
/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
* so the programs (can be more than one that used this map) were
* disconnected from events. Wait for outstanding critical sections in
* these programs to complete
*/
synchronize_rcu();
rb_map = container_of(map, struct bpf_ringbuf_map, map); rb_map = container_of(map, struct bpf_ringbuf_map, map);
bpf_ringbuf_free(rb_map->rb); bpf_ringbuf_free(rb_map->rb);
kfree(rb_map); kfree(rb_map);

Просмотреть файл

@ -604,9 +604,6 @@ static void stack_map_free(struct bpf_map *map)
{ {
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
/* wait for bpf programs to complete before freeing stack map */
synchronize_rcu();
bpf_map_area_free(smap->elems); bpf_map_area_free(smap->elems);
pcpu_freelist_destroy(&smap->freelist); pcpu_freelist_destroy(&smap->freelist);
bpf_map_area_free(smap); bpf_map_area_free(smap);