WSL2-Linux-Kernel/include/linux/cookie.h

52 строки
1.2 KiB
C
Исходник Обычный вид История

bpf, net: Rework cookie generator as per-cpu one With its use in BPF, the cookie generator can be called very frequently in particular when used out of cgroup v2 hooks (e.g. connect / sendmsg) and attached to the root cgroup, for example, when used in v1/v2 mixed environments. In particular, when there's a high churn on sockets in the system there can be many parallel requests to the bpf_get_socket_cookie() and bpf_get_netns_cookie() helpers which then cause contention on the atomic counter. As similarly done in f991bd2e1421 ("fs: introduce a per-cpu last_ino allocator"), add a small helper library that both can use for the 64 bit counters. Given this can be called from different contexts, we also need to deal with potential nested calls even though in practice they are considered extremely rare. One idea as suggested by Eric Dumazet was to use a reverse counter for this situation since we don't expect 64 bit overflows anyways; that way, we can avoid bigger gaps in the 64 bit counter space compared to just batch-wise increase. Even on machines with small number of cores (e.g. 4) the cookie generation shrinks from min/max/med/avg (ns) of 22/50/40/38.9 down to 10/35/14/17.3 when run in parallel from multiple CPUs. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Link: https://lore.kernel.org/bpf/8a80b8d27d3c49f9a14e1d5213c19d8be87d1dc8.1601477936.git.daniel@iogearbox.net
2020-09-30 18:18:16 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COOKIE_H
#define __LINUX_COOKIE_H
#include <linux/atomic.h>
#include <linux/percpu.h>
#include <asm/local.h>
struct pcpu_gen_cookie {
local_t nesting;
u64 last;
} __aligned(16);
struct gen_cookie {
struct pcpu_gen_cookie __percpu *local;
atomic64_t forward_last ____cacheline_aligned_in_smp;
atomic64_t reverse_last;
};
#define COOKIE_LOCAL_BATCH 4096
#define DEFINE_COOKIE(name) \
static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
static struct gen_cookie name = { \
.local = &__##name, \
.forward_last = ATOMIC64_INIT(0), \
.reverse_last = ATOMIC64_INIT(0), \
}
static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
{
struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
u64 val;
if (likely(local_inc_return(&local->nesting) == 1)) {
val = local->last;
if (__is_defined(CONFIG_SMP) &&
unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
&gc->forward_last);
val = next - COOKIE_LOCAL_BATCH;
}
local->last = ++val;
} else {
val = atomic64_dec_return(&gc->reverse_last);
}
local_dec(&local->nesting);
return val;
}
#endif /* __LINUX_COOKIE_H */