bpf: Remove bpf_selem_free_fields*_rcu

This patch removes the bpf_selem_free_fields*_rcu. The
bpf_obj_free_fields() can be done before the call_rcu_trasks_trace()
and kfree_rcu(). It is needed when a later patch uses
bpf_mem_cache_alloc/free. In bpf hashtab, bpf_obj_free_fields()
is also called before calling bpf_mem_cache_free. The discussion
can be found in
https://lore.kernel.org/bpf/f67021ee-21d9-bfae-6134-4ca542fab843@linux.dev/

Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230308065936.1550103-8-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Martin KaFai Lau 2023-03-07 22:59:26 -08:00 коммит произвёл Alexei Starovoitov
Родитель a47eabf216
Коммит c609981342
1 изменённых файлов: 5 добавлений и 62 удалений

Просмотреть файл

@ -109,27 +109,6 @@ static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
kfree_rcu(local_storage, rcu); kfree_rcu(local_storage, rcu);
} }
static void bpf_selem_free_fields_rcu(struct rcu_head *rcu)
{
struct bpf_local_storage_elem *selem;
struct bpf_local_storage_map *smap;
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
/* protected by the rcu_barrier*() */
smap = rcu_dereference_protected(SDATA(selem)->smap, true);
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
kfree(selem);
}
static void bpf_selem_free_fields_trace_rcu(struct rcu_head *rcu)
{
/* Free directly if Tasks Trace RCU GP also implies RCU GP */
if (rcu_trace_implies_rcu_gp())
bpf_selem_free_fields_rcu(rcu);
else
call_rcu(rcu, bpf_selem_free_fields_rcu);
}
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu) static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
{ {
struct bpf_local_storage_elem *selem; struct bpf_local_storage_elem *selem;
@ -151,7 +130,6 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
{ {
struct bpf_local_storage_map *smap; struct bpf_local_storage_map *smap;
bool free_local_storage; bool free_local_storage;
struct btf_record *rec;
void *owner; void *owner;
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
@ -192,26 +170,11 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
SDATA(selem)) SDATA(selem))
RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
/* A different RCU callback is chosen whenever we need to free bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
* additional fields in selem data before freeing selem. if (!reuse_now)
* bpf_local_storage_map_free only executes rcu_barrier to wait for RCU call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
* callbacks when it has special fields, hence we can only conditionally else
* dereference smap, as by this time the map might have already been kfree_rcu(selem, rcu);
* freed without waiting for our call_rcu callback if it did not have
* any special fields.
*/
rec = smap->map.record;
if (!reuse_now) {
if (!IS_ERR_OR_NULL(rec))
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_fields_trace_rcu);
else
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
} else {
if (!IS_ERR_OR_NULL(rec))
call_rcu(&selem->rcu, bpf_selem_free_fields_rcu);
else
kfree_rcu(selem, rcu);
}
if (rcu_access_pointer(local_storage->smap) == smap) if (rcu_access_pointer(local_storage->smap) == smap)
RCU_INIT_POINTER(local_storage->smap, NULL); RCU_INIT_POINTER(local_storage->smap, NULL);
@ -769,26 +732,6 @@ void bpf_local_storage_map_free(struct bpf_map *map,
*/ */
synchronize_rcu(); synchronize_rcu();
/* Only delay freeing of smap, buckets are not needed anymore */
kvfree(smap->buckets); kvfree(smap->buckets);
/* When local storage has special fields, callbacks for
* bpf_selem_free_fields_rcu and bpf_selem_free_fields_trace_rcu will
* keep using the map BTF record, we need to execute an RCU barrier to
* wait for them as the record will be freed right after our map_free
* callback.
*/
if (!IS_ERR_OR_NULL(smap->map.record)) {
rcu_barrier_tasks_trace();
/* We cannot skip rcu_barrier() when rcu_trace_implies_rcu_gp()
* is true, because while call_rcu invocation is skipped in that
* case in bpf_selem_free_fields_trace_rcu (and all local
* storage maps pass reuse_now = false), there can be
* call_rcu callbacks based on reuse_now = true in the
* while ((selem = ...)) loop above or when owner's free path
* calls bpf_local_storage_unlink_nolock.
*/
rcu_barrier();
}
bpf_map_area_free(smap); bpf_map_area_free(smap);
} }