2019-11-14 21:57:04 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Copyright (c) 2019 Facebook */
|
|
|
|
#include <linux/hash.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/filter.h>
|
2019-12-09 03:01:13 +03:00
|
|
|
#include <linux/ftrace.h>
|
2020-01-23 19:15:07 +03:00
|
|
|
#include <linux/rbtree_latch.h>
|
2020-03-12 22:56:05 +03:00
|
|
|
#include <linux/perf_event.h>
|
2020-03-29 03:43:52 +03:00
|
|
|
#include <linux/btf.h>
|
2020-08-28 01:01:11 +03:00
|
|
|
#include <linux/rcupdate_trace.h>
|
|
|
|
#include <linux/rcupdate_wait.h>
|
2021-03-26 13:59:00 +03:00
|
|
|
#include <linux/module.h>
|
2019-11-14 21:57:04 +03:00
|
|
|
|
2020-01-21 03:53:46 +03:00
|
|
|
/* dummy _ops. The verifier will operate on target program's ops. */
|
|
|
|
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
|
|
|
|
};
|
|
|
|
const struct bpf_prog_ops bpf_extension_prog_ops = {
|
|
|
|
};
|
|
|
|
|
2019-11-14 21:57:04 +03:00
|
|
|
/* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
|
|
|
|
#define TRAMPOLINE_HASH_BITS 10
|
|
|
|
#define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
|
|
|
|
|
|
|
|
static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
|
|
|
|
|
2020-03-12 22:56:07 +03:00
|
|
|
/* serializes access to trampoline_table */
|
2019-11-14 21:57:04 +03:00
|
|
|
static DEFINE_MUTEX(trampoline_mutex);
|
|
|
|
|
2020-03-12 22:56:07 +03:00
|
|
|
void *bpf_jit_alloc_exec_page(void)
|
2019-12-13 20:51:07 +03:00
|
|
|
{
|
|
|
|
void *image;
|
|
|
|
|
|
|
|
image = bpf_jit_alloc_exec(PAGE_SIZE);
|
|
|
|
if (!image)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
set_vm_flush_reset_perms(image);
|
|
|
|
/* Keep image as writeable. The alternative is to keep flipping ro/rw
|
|
|
|
* everytime new program is attached or detached.
|
|
|
|
*/
|
|
|
|
set_memory_x((long)image, 1);
|
|
|
|
return image;
|
|
|
|
}
|
|
|
|
|
2020-03-12 22:56:05 +03:00
|
|
|
void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
|
|
|
|
{
|
|
|
|
ksym->start = (unsigned long) data;
|
2020-03-12 22:56:07 +03:00
|
|
|
ksym->end = ksym->start + PAGE_SIZE;
|
2020-03-12 22:56:05 +03:00
|
|
|
bpf_ksym_add(ksym);
|
|
|
|
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
|
2020-03-12 22:56:07 +03:00
|
|
|
PAGE_SIZE, false, ksym->name);
|
2020-03-12 22:56:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_image_ksym_del(struct bpf_ksym *ksym)
|
|
|
|
{
|
|
|
|
bpf_ksym_del(ksym);
|
|
|
|
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
|
2020-03-12 22:56:07 +03:00
|
|
|
PAGE_SIZE, true, ksym->name);
|
2020-03-12 22:56:05 +03:00
|
|
|
}
|
|
|
|
|
2020-09-26 00:25:02 +03:00
|
|
|
static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
|
|
|
struct bpf_trampoline *tr;
|
|
|
|
struct hlist_head *head;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mutex_lock(&trampoline_mutex);
|
|
|
|
head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
|
|
|
|
hlist_for_each_entry(tr, head, hlist) {
|
|
|
|
if (tr->key == key) {
|
|
|
|
refcount_inc(&tr->refcnt);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
|
|
|
|
if (!tr)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tr->key = key;
|
|
|
|
INIT_HLIST_NODE(&tr->hlist);
|
|
|
|
hlist_add_head(&tr->hlist, head);
|
|
|
|
refcount_set(&tr->refcnt, 1);
|
|
|
|
mutex_init(&tr->mutex);
|
|
|
|
for (i = 0; i < BPF_TRAMP_MAX; i++)
|
|
|
|
INIT_HLIST_HEAD(&tr->progs_hlist[i]);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trampoline_mutex);
|
|
|
|
return tr;
|
|
|
|
}
|
|
|
|
|
2021-03-26 13:59:00 +03:00
|
|
|
static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
|
|
|
|
{
|
|
|
|
struct module *mod;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
mod = __module_text_address((unsigned long) tr->func.addr);
|
|
|
|
if (mod && !try_module_get(mod))
|
|
|
|
err = -ENOENT;
|
|
|
|
preempt_enable();
|
|
|
|
tr->mod = mod;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
|
|
|
|
{
|
|
|
|
module_put(tr->mod);
|
|
|
|
tr->mod = NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-09 03:01:13 +03:00
|
|
|
static int is_ftrace_location(void *ip)
|
|
|
|
{
|
|
|
|
long addr;
|
|
|
|
|
|
|
|
addr = ftrace_location((long)ip);
|
|
|
|
if (!addr)
|
|
|
|
return 0;
|
|
|
|
if (WARN_ON_ONCE(addr != (long)ip))
|
|
|
|
return -EFAULT;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
|
|
|
|
{
|
|
|
|
void *ip = tr->func.addr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (tr->func.ftrace_managed)
|
|
|
|
ret = unregister_ftrace_direct((long)ip, (long)old_addr);
|
|
|
|
else
|
|
|
|
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
|
2021-03-26 13:59:00 +03:00
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
bpf_trampoline_module_put(tr);
|
2019-12-09 03:01:13 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
|
|
|
|
{
|
|
|
|
void *ip = tr->func.addr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (tr->func.ftrace_managed)
|
|
|
|
ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
|
|
|
|
else
|
|
|
|
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* first time registering */
|
|
|
|
static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
|
|
|
|
{
|
|
|
|
void *ip = tr->func.addr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = is_ftrace_location(ip);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
tr->func.ftrace_managed = ret;
|
|
|
|
|
2021-03-26 13:59:00 +03:00
|
|
|
if (bpf_trampoline_module_get(tr))
|
|
|
|
return -ENOENT;
|
|
|
|
|
2019-12-09 03:01:13 +03:00
|
|
|
if (tr->func.ftrace_managed)
|
|
|
|
ret = register_ftrace_direct((long)ip, (long)new_addr);
|
|
|
|
else
|
|
|
|
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
|
2021-03-26 13:59:00 +03:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
bpf_trampoline_module_put(tr);
|
2019-12-09 03:01:13 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-04 22:18:47 +03:00
|
|
|
static struct bpf_tramp_progs *
|
|
|
|
bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
|
|
|
|
{
|
|
|
|
const struct bpf_prog_aux *aux;
|
|
|
|
struct bpf_tramp_progs *tprogs;
|
|
|
|
struct bpf_prog **progs;
|
|
|
|
int kind;
|
|
|
|
|
|
|
|
*total = 0;
|
|
|
|
tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
|
|
|
|
if (!tprogs)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
|
|
|
|
tprogs[kind].nr_progs = tr->progs_cnt[kind];
|
|
|
|
*total += tr->progs_cnt[kind];
|
|
|
|
progs = tprogs[kind].progs;
|
|
|
|
|
|
|
|
hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
|
|
|
|
*progs++ = aux->prog;
|
|
|
|
}
|
|
|
|
return tprogs;
|
|
|
|
}
|
2019-11-14 21:57:04 +03:00
|
|
|
|
2021-03-17 00:00:07 +03:00
|
|
|
static void __bpf_tramp_image_put_deferred(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct bpf_tramp_image *im;
|
|
|
|
|
|
|
|
im = container_of(work, struct bpf_tramp_image, work);
|
|
|
|
bpf_image_ksym_del(&im->ksym);
|
|
|
|
bpf_jit_free_exec(im->image);
|
|
|
|
bpf_jit_uncharge_modmem(1);
|
|
|
|
percpu_ref_exit(&im->pcref);
|
|
|
|
kfree_rcu(im, rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* callback, fexit step 3 or fentry step 2 */
|
|
|
|
static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_tramp_image *im;
|
|
|
|
|
|
|
|
im = container_of(rcu, struct bpf_tramp_image, rcu);
|
|
|
|
INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
|
|
|
|
schedule_work(&im->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
|
|
|
|
static void __bpf_tramp_image_release(struct percpu_ref *pcref)
|
|
|
|
{
|
|
|
|
struct bpf_tramp_image *im;
|
|
|
|
|
|
|
|
im = container_of(pcref, struct bpf_tramp_image, pcref);
|
|
|
|
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* callback, fexit or fentry step 1 */
|
|
|
|
static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_tramp_image *im;
|
|
|
|
|
|
|
|
im = container_of(rcu, struct bpf_tramp_image, rcu);
|
|
|
|
if (im->ip_after_call)
|
|
|
|
/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
|
|
|
|
percpu_ref_kill(&im->pcref);
|
|
|
|
else
|
|
|
|
/* the case of fentry trampoline */
|
|
|
|
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bpf_tramp_image_put(struct bpf_tramp_image *im)
|
|
|
|
{
|
|
|
|
/* The trampoline image that calls original function is using:
|
|
|
|
* rcu_read_lock_trace to protect sleepable bpf progs
|
|
|
|
* rcu_read_lock to protect normal bpf progs
|
|
|
|
* percpu_ref to protect trampoline itself
|
|
|
|
* rcu tasks to protect trampoline asm not covered by percpu_ref
|
|
|
|
* (which are few asm insns before __bpf_tramp_enter and
|
|
|
|
* after __bpf_tramp_exit)
|
|
|
|
*
|
|
|
|
* The trampoline is unreachable before bpf_tramp_image_put().
|
|
|
|
*
|
|
|
|
* First, patch the trampoline to avoid calling into fexit progs.
|
|
|
|
* The progs will be freed even if the original function is still
|
|
|
|
* executing or sleeping.
|
|
|
|
* In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
|
|
|
|
* first few asm instructions to execute and call into
|
|
|
|
* __bpf_tramp_enter->percpu_ref_get.
|
|
|
|
* Then use percpu_ref_kill to wait for the trampoline and the original
|
|
|
|
* function to finish.
|
|
|
|
* Then use call_rcu_tasks() to make sure few asm insns in
|
|
|
|
* the trampoline epilogue are done as well.
|
|
|
|
*
|
|
|
|
* In !PREEMPT case the task that got interrupted in the first asm
|
|
|
|
* insns won't go through an RCU quiescent state which the
|
|
|
|
* percpu_ref_kill will be waiting for. Hence the first
|
|
|
|
* call_rcu_tasks() is not necessary.
|
|
|
|
*/
|
|
|
|
if (im->ip_after_call) {
|
|
|
|
int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
|
|
|
|
NULL, im->ip_epilogue);
|
|
|
|
WARN_ON(err);
|
|
|
|
if (IS_ENABLED(CONFIG_PREEMPTION))
|
|
|
|
call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
|
|
|
|
else
|
|
|
|
percpu_ref_kill(&im->pcref);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The trampoline without fexit and fmod_ret progs doesn't call original
|
|
|
|
* function and doesn't use percpu_ref.
|
|
|
|
* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
|
|
|
|
* Then use call_rcu_tasks() to wait for the rest of trampoline asm
|
|
|
|
* and normal progs.
|
|
|
|
*/
|
|
|
|
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
|
|
|
|
{
|
|
|
|
struct bpf_tramp_image *im;
|
|
|
|
struct bpf_ksym *ksym;
|
|
|
|
void *image;
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
|
|
|
im = kzalloc(sizeof(*im), GFP_KERNEL);
|
|
|
|
if (!im)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = bpf_jit_charge_modmem(1);
|
|
|
|
if (err)
|
|
|
|
goto out_free_im;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
im->image = image = bpf_jit_alloc_exec_page();
|
|
|
|
if (!image)
|
|
|
|
goto out_uncharge;
|
|
|
|
|
|
|
|
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
|
|
|
|
if (err)
|
|
|
|
goto out_free_image;
|
|
|
|
|
|
|
|
ksym = &im->ksym;
|
|
|
|
INIT_LIST_HEAD_RCU(&ksym->lnode);
|
|
|
|
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
|
|
|
|
bpf_image_ksym_add(image, ksym);
|
|
|
|
return im;
|
|
|
|
|
|
|
|
out_free_image:
|
|
|
|
bpf_jit_free_exec(im->image);
|
|
|
|
out_uncharge:
|
|
|
|
bpf_jit_uncharge_modmem(1);
|
|
|
|
out_free_im:
|
|
|
|
kfree(im);
|
|
|
|
out:
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:57:04 +03:00
|
|
|
static int bpf_trampoline_update(struct bpf_trampoline *tr)
|
|
|
|
{
|
2021-03-17 00:00:07 +03:00
|
|
|
struct bpf_tramp_image *im;
|
2020-03-04 22:18:47 +03:00
|
|
|
struct bpf_tramp_progs *tprogs;
|
2019-11-14 21:57:04 +03:00
|
|
|
u32 flags = BPF_TRAMP_F_RESTORE_REGS;
|
2020-03-04 22:18:47 +03:00
|
|
|
int err, total;
|
2019-11-14 21:57:04 +03:00
|
|
|
|
2020-03-04 22:18:47 +03:00
|
|
|
tprogs = bpf_trampoline_get_progs(tr, &total);
|
|
|
|
if (IS_ERR(tprogs))
|
|
|
|
return PTR_ERR(tprogs);
|
|
|
|
|
|
|
|
if (total == 0) {
|
2021-03-17 00:00:07 +03:00
|
|
|
err = unregister_fentry(tr, tr->cur_image->image);
|
|
|
|
bpf_tramp_image_put(tr->cur_image);
|
|
|
|
tr->cur_image = NULL;
|
2019-11-14 21:57:04 +03:00
|
|
|
tr->selector = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2021-03-17 00:00:07 +03:00
|
|
|
im = bpf_tramp_image_alloc(tr->key, tr->selector);
|
|
|
|
if (IS_ERR(im)) {
|
|
|
|
err = PTR_ERR(im);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-03-04 22:18:49 +03:00
|
|
|
if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
|
|
|
|
tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
|
2019-11-14 21:57:04 +03:00
|
|
|
flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
|
|
|
|
|
2021-03-17 00:00:07 +03:00
|
|
|
err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
|
2020-03-04 22:18:47 +03:00
|
|
|
&tr->func.model, flags, tprogs,
|
2019-11-14 21:57:04 +03:00
|
|
|
tr->func.addr);
|
bpf: Introduce BPF_MAP_TYPE_STRUCT_OPS
The patch introduces BPF_MAP_TYPE_STRUCT_OPS. The map value
is a kernel struct with its func ptr implemented in bpf prog.
This new map is the interface to register/unregister/introspect
a bpf implemented kernel struct.
The kernel struct is actually embedded inside another new struct
(or called the "value" struct in the code). For example,
"struct tcp_congestion_ops" is embbeded in:
struct bpf_struct_ops_tcp_congestion_ops {
refcount_t refcnt;
enum bpf_struct_ops_state state;
struct tcp_congestion_ops data; /* <-- kernel subsystem struct here */
}
The map value is "struct bpf_struct_ops_tcp_congestion_ops".
The "bpftool map dump" will then be able to show the
state ("inuse"/"tobefree") and the number of subsystem's refcnt (e.g.
number of tcp_sock in the tcp_congestion_ops case). This "value" struct
is created automatically by a macro. Having a separate "value" struct
will also make extending "struct bpf_struct_ops_XYZ" easier (e.g. adding
"void (*init)(void)" to "struct bpf_struct_ops_XYZ" to do some
initialization works before registering the struct_ops to the kernel
subsystem). The libbpf will take care of finding and populating the
"struct bpf_struct_ops_XYZ" from "struct XYZ".
Register a struct_ops to a kernel subsystem:
1. Load all needed BPF_PROG_TYPE_STRUCT_OPS prog(s)
2. Create a BPF_MAP_TYPE_STRUCT_OPS with attr->btf_vmlinux_value_type_id
set to the btf id "struct bpf_struct_ops_tcp_congestion_ops" of the
running kernel.
Instead of reusing the attr->btf_value_type_id,
btf_vmlinux_value_type_id s added such that attr->btf_fd can still be
used as the "user" btf which could store other useful sysadmin/debug
info that may be introduced in the furture,
e.g. creation-date/compiler-details/map-creator...etc.
3. Create a "struct bpf_struct_ops_tcp_congestion_ops" object as described
in the running kernel btf. Populate the value of this object.
The function ptr should be populated with the prog fds.
4. Call BPF_MAP_UPDATE with the object created in (3) as
the map value. The key is always "0".
During BPF_MAP_UPDATE, the code that saves the kernel-func-ptr's
args as an array of u64 is generated. BPF_MAP_UPDATE also allows
the specific struct_ops to do some final checks in "st_ops->init_member()"
(e.g. ensure all mandatory func ptrs are implemented).
If everything looks good, it will register this kernel struct
to the kernel subsystem. The map will not allow further update
from this point.
Unregister a struct_ops from the kernel subsystem:
BPF_MAP_DELETE with key "0".
Introspect a struct_ops:
BPF_MAP_LOOKUP_ELEM with key "0". The map value returned will
have the prog _id_ populated as the func ptr.
The map value state (enum bpf_struct_ops_state) will transit from:
INIT (map created) =>
INUSE (map updated, i.e. reg) =>
TOBEFREE (map value deleted, i.e. unreg)
The kernel subsystem needs to call bpf_struct_ops_get() and
bpf_struct_ops_put() to manage the "refcnt" in the
"struct bpf_struct_ops_XYZ". This patch uses a separate refcnt
for the purose of tracking the subsystem usage. Another approach
is to reuse the map->refcnt and then "show" (i.e. during map_lookup)
the subsystem's usage by doing map->refcnt - map->usercnt to filter out
the map-fd/pinned-map usage. However, that will also tie down the
future semantics of map->refcnt and map->usercnt.
The very first subsystem's refcnt (during reg()) holds one
count to map->refcnt. When the very last subsystem's refcnt
is gone, it will also release the map->refcnt. All bpf_prog will be
freed when the map->refcnt reaches 0 (i.e. during map_free()).
Here is how the bpftool map command will look like:
[root@arch-fb-vm1 bpf]# bpftool map show
6: struct_ops name dctcp flags 0x0
key 4B value 256B max_entries 1 memlock 4096B
btf_id 6
[root@arch-fb-vm1 bpf]# bpftool map dump id 6
[{
"value": {
"refcnt": {
"refs": {
"counter": 1
}
},
"state": 1,
"data": {
"list": {
"next": 0,
"prev": 0
},
"key": 0,
"flags": 2,
"init": 24,
"release": 0,
"ssthresh": 25,
"cong_avoid": 30,
"set_state": 27,
"cwnd_event": 28,
"in_ack_event": 26,
"undo_cwnd": 29,
"pkts_acked": 0,
"min_tso_segs": 0,
"sndbuf_expand": 0,
"cong_control": 0,
"get_info": 0,
"name": [98,112,102,95,100,99,116,99,112,0,0,0,0,0,0,0
],
"owner": 0
}
}
}
]
Misc Notes:
* bpf_struct_ops_map_sys_lookup_elem() is added for syscall lookup.
It does an inplace update on "*value" instead returning a pointer
to syscall.c. Otherwise, it needs a separate copy of "zero" value
for the BPF_STRUCT_OPS_STATE_INIT to avoid races.
* The bpf_struct_ops_map_delete_elem() is also called without
preempt_disable() from map_delete_elem(). It is because
the "->unreg()" may requires sleepable context, e.g.
the "tcp_unregister_congestion_control()".
* "const" is added to some of the existing "struct btf_func_model *"
function arg to avoid a compiler warning caused by this patch.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200109003505.3855919-1-kafai@fb.com
2020-01-09 03:35:05 +03:00
|
|
|
if (err < 0)
|
2019-11-14 21:57:04 +03:00
|
|
|
goto out;
|
|
|
|
|
2021-03-17 00:00:07 +03:00
|
|
|
WARN_ON(tr->cur_image && tr->selector == 0);
|
|
|
|
WARN_ON(!tr->cur_image && tr->selector);
|
|
|
|
if (tr->cur_image)
|
2019-11-14 21:57:04 +03:00
|
|
|
/* progs already running at this address */
|
2021-03-17 00:00:07 +03:00
|
|
|
err = modify_fentry(tr, tr->cur_image->image, im->image);
|
2019-11-14 21:57:04 +03:00
|
|
|
else
|
|
|
|
/* first time registering */
|
2021-03-17 00:00:07 +03:00
|
|
|
err = register_fentry(tr, im->image);
|
2019-11-14 21:57:04 +03:00
|
|
|
if (err)
|
|
|
|
goto out;
|
2021-03-17 00:00:07 +03:00
|
|
|
if (tr->cur_image)
|
|
|
|
bpf_tramp_image_put(tr->cur_image);
|
|
|
|
tr->cur_image = im;
|
2019-11-14 21:57:04 +03:00
|
|
|
tr->selector++;
|
|
|
|
out:
|
2020-03-04 22:18:47 +03:00
|
|
|
kfree(tprogs);
|
2019-11-14 21:57:04 +03:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-29 03:43:52 +03:00
|
|
|
static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
2020-03-29 03:43:52 +03:00
|
|
|
switch (prog->expected_attach_type) {
|
2019-11-14 21:57:04 +03:00
|
|
|
case BPF_TRACE_FENTRY:
|
|
|
|
return BPF_TRAMP_FENTRY;
|
2020-03-04 22:18:49 +03:00
|
|
|
case BPF_MODIFY_RETURN:
|
|
|
|
return BPF_TRAMP_MODIFY_RETURN;
|
2020-01-21 03:53:46 +03:00
|
|
|
case BPF_TRACE_FEXIT:
|
2019-11-14 21:57:04 +03:00
|
|
|
return BPF_TRAMP_FEXIT;
|
2020-03-29 03:43:52 +03:00
|
|
|
case BPF_LSM_MAC:
|
|
|
|
if (!prog->aux->attach_func_proto->type)
|
|
|
|
/* The function returns void, we cannot modify its
|
|
|
|
* return value.
|
|
|
|
*/
|
|
|
|
return BPF_TRAMP_FEXIT;
|
|
|
|
else
|
|
|
|
return BPF_TRAMP_MODIFY_RETURN;
|
2020-01-21 03:53:46 +03:00
|
|
|
default:
|
|
|
|
return BPF_TRAMP_REPLACE;
|
2019-11-14 21:57:04 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-29 15:45:50 +03:00
|
|
|
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
|
|
|
enum bpf_tramp_prog_type kind;
|
|
|
|
int err = 0;
|
2020-01-21 03:53:46 +03:00
|
|
|
int cnt;
|
2019-11-14 21:57:04 +03:00
|
|
|
|
2020-03-29 03:43:52 +03:00
|
|
|
kind = bpf_attach_type_to_tramp(prog);
|
2019-11-14 21:57:04 +03:00
|
|
|
mutex_lock(&tr->mutex);
|
2020-01-21 03:53:46 +03:00
|
|
|
if (tr->extension_prog) {
|
|
|
|
/* cannot attach fentry/fexit if extension prog is attached.
|
|
|
|
* cannot overwrite extension prog either.
|
|
|
|
*/
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
|
|
|
|
if (kind == BPF_TRAMP_REPLACE) {
|
|
|
|
/* Cannot attach extension if fentry/fexit are in use. */
|
|
|
|
if (cnt) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
tr->extension_prog = prog;
|
|
|
|
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
|
|
|
|
prog->bpf_func);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (cnt >= BPF_MAX_TRAMP_PROGS) {
|
2019-11-14 21:57:04 +03:00
|
|
|
err = -E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
|
|
|
|
/* prog already linked */
|
|
|
|
err = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
|
|
|
|
tr->progs_cnt[kind]++;
|
2020-09-29 15:45:50 +03:00
|
|
|
err = bpf_trampoline_update(tr);
|
2019-11-14 21:57:04 +03:00
|
|
|
if (err) {
|
2021-04-14 22:51:41 +03:00
|
|
|
hlist_del_init(&prog->aux->tramp_hlist);
|
2019-11-14 21:57:04 +03:00
|
|
|
tr->progs_cnt[kind]--;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
mutex_unlock(&tr->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bpf_trampoline_unlink_prog() should never fail. */
|
2020-09-29 15:45:50 +03:00
|
|
|
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
|
|
|
enum bpf_tramp_prog_type kind;
|
|
|
|
int err;
|
|
|
|
|
2020-03-29 03:43:52 +03:00
|
|
|
kind = bpf_attach_type_to_tramp(prog);
|
2019-11-14 21:57:04 +03:00
|
|
|
mutex_lock(&tr->mutex);
|
2020-01-21 03:53:46 +03:00
|
|
|
if (kind == BPF_TRAMP_REPLACE) {
|
|
|
|
WARN_ON_ONCE(!tr->extension_prog);
|
|
|
|
err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
|
|
|
|
tr->extension_prog->bpf_func, NULL);
|
|
|
|
tr->extension_prog = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-04-14 22:51:41 +03:00
|
|
|
hlist_del_init(&prog->aux->tramp_hlist);
|
2019-11-14 21:57:04 +03:00
|
|
|
tr->progs_cnt[kind]--;
|
2020-09-29 15:45:50 +03:00
|
|
|
err = bpf_trampoline_update(tr);
|
2020-01-21 03:53:46 +03:00
|
|
|
out:
|
2019-11-14 21:57:04 +03:00
|
|
|
mutex_unlock(&tr->mutex);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-09-26 00:25:02 +03:00
|
|
|
struct bpf_trampoline *bpf_trampoline_get(u64 key,
|
|
|
|
struct bpf_attach_target_info *tgt_info)
|
|
|
|
{
|
|
|
|
struct bpf_trampoline *tr;
|
|
|
|
|
|
|
|
tr = bpf_trampoline_lookup(key);
|
|
|
|
if (!tr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
mutex_lock(&tr->mutex);
|
|
|
|
if (tr->func.addr)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
|
|
|
|
tr->func.addr = (void *)tgt_info->tgt_addr;
|
|
|
|
out:
|
|
|
|
mutex_unlock(&tr->mutex);
|
|
|
|
return tr;
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:57:04 +03:00
|
|
|
void bpf_trampoline_put(struct bpf_trampoline *tr)
|
|
|
|
{
|
|
|
|
if (!tr)
|
|
|
|
return;
|
|
|
|
mutex_lock(&trampoline_mutex);
|
|
|
|
if (!refcount_dec_and_test(&tr->refcnt))
|
|
|
|
goto out;
|
|
|
|
WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
|
|
|
|
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
|
|
|
|
goto out;
|
|
|
|
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
|
|
|
|
goto out;
|
2021-03-17 00:00:07 +03:00
|
|
|
/* This code will be executed even when the last bpf_tramp_image
|
|
|
|
* is alive. All progs are detached from the trampoline and the
|
|
|
|
* trampoline image is patched with jmp into epilogue to skip
|
|
|
|
* fexit progs. The fentry-only trampoline will be freed via
|
|
|
|
* multiple rcu callbacks.
|
2020-08-28 01:01:11 +03:00
|
|
|
*/
|
2019-11-14 21:57:04 +03:00
|
|
|
hlist_del(&tr->hlist);
|
|
|
|
kfree(tr);
|
|
|
|
out:
|
|
|
|
mutex_unlock(&trampoline_mutex);
|
|
|
|
}
|
|
|
|
|
2021-02-10 06:36:29 +03:00
|
|
|
#define NO_START_TIME 1
|
2021-02-10 06:36:28 +03:00
|
|
|
static u64 notrace bpf_prog_start_time(void)
|
|
|
|
{
|
|
|
|
u64 start = NO_START_TIME;
|
|
|
|
|
2021-02-10 06:36:29 +03:00
|
|
|
if (static_branch_unlikely(&bpf_stats_enabled_key)) {
|
2021-02-10 06:36:28 +03:00
|
|
|
start = sched_clock();
|
2021-02-10 06:36:29 +03:00
|
|
|
if (unlikely(!start))
|
|
|
|
start = NO_START_TIME;
|
|
|
|
}
|
2021-02-10 06:36:28 +03:00
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
2021-02-10 06:36:31 +03:00
|
|
|
static void notrace inc_misses_counter(struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
struct bpf_prog_stats *stats;
|
|
|
|
|
|
|
|
stats = this_cpu_ptr(prog->stats);
|
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
stats->misses++;
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
|
|
|
}
|
|
|
|
|
2020-02-24 17:01:45 +03:00
|
|
|
/* The logic is similar to BPF_PROG_RUN, but with an explicit
|
|
|
|
* rcu_read_lock() and migrate_disable() which are required
|
|
|
|
* for the trampoline. The macro is split into
|
2021-02-10 06:36:28 +03:00
|
|
|
* call __bpf_prog_enter
|
2019-11-14 21:57:04 +03:00
|
|
|
* call prog->bpf_func
|
|
|
|
* call __bpf_prog_exit
|
2021-02-10 06:36:29 +03:00
|
|
|
*
|
|
|
|
* __bpf_prog_enter returns:
|
|
|
|
* 0 - skip execution of the bpf prog
|
|
|
|
* 1 - execute bpf prog
|
2021-05-25 05:56:59 +03:00
|
|
|
* [2..MAX_U64] - execute bpf prog and record execution time.
|
2021-02-10 06:36:29 +03:00
|
|
|
* This is start time.
|
2019-11-14 21:57:04 +03:00
|
|
|
*/
|
2021-02-10 06:36:29 +03:00
|
|
|
u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
|
2020-03-11 04:09:01 +03:00
|
|
|
__acquires(RCU)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
|
|
|
rcu_read_lock();
|
2020-02-24 17:01:45 +03:00
|
|
|
migrate_disable();
|
2021-02-10 06:36:31 +03:00
|
|
|
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
|
|
|
|
inc_misses_counter(prog);
|
2021-02-10 06:36:29 +03:00
|
|
|
return 0;
|
2021-02-10 06:36:31 +03:00
|
|
|
}
|
2021-02-10 06:36:28 +03:00
|
|
|
return bpf_prog_start_time();
|
2019-11-14 21:57:04 +03:00
|
|
|
}
|
|
|
|
|
2021-02-10 06:36:28 +03:00
|
|
|
static void notrace update_prog_stats(struct bpf_prog *prog,
|
|
|
|
u64 start)
|
2019-11-14 21:57:04 +03:00
|
|
|
{
|
|
|
|
struct bpf_prog_stats *stats;
|
|
|
|
|
|
|
|
if (static_branch_unlikely(&bpf_stats_enabled_key) &&
|
2021-02-10 06:36:28 +03:00
|
|
|
/* static_key could be enabled in __bpf_prog_enter*
|
|
|
|
* and disabled in __bpf_prog_exit*.
|
2019-11-14 21:57:04 +03:00
|
|
|
* And vice versa.
|
2021-02-10 06:36:28 +03:00
|
|
|
* Hence check that 'start' is valid.
|
2019-11-14 21:57:04 +03:00
|
|
|
*/
|
2021-02-10 06:36:28 +03:00
|
|
|
start > NO_START_TIME) {
|
2021-02-10 06:36:26 +03:00
|
|
|
stats = this_cpu_ptr(prog->stats);
|
2019-11-14 21:57:04 +03:00
|
|
|
u64_stats_update_begin(&stats->syncp);
|
|
|
|
stats->cnt++;
|
|
|
|
stats->nsecs += sched_clock() - start;
|
|
|
|
u64_stats_update_end(&stats->syncp);
|
|
|
|
}
|
2021-02-10 06:36:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
|
|
|
|
__releases(RCU)
|
|
|
|
{
|
|
|
|
update_prog_stats(prog, start);
|
2021-02-10 06:36:29 +03:00
|
|
|
__this_cpu_dec(*(prog->active));
|
2020-02-24 17:01:45 +03:00
|
|
|
migrate_enable();
|
2019-11-14 21:57:04 +03:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2021-02-10 06:36:29 +03:00
|
|
|
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
|
2020-08-28 01:01:11 +03:00
|
|
|
{
|
|
|
|
rcu_read_lock_trace();
|
2021-02-10 06:36:27 +03:00
|
|
|
migrate_disable();
|
2020-08-31 23:16:51 +03:00
|
|
|
might_fault();
|
2021-02-10 06:36:31 +03:00
|
|
|
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
|
|
|
|
inc_misses_counter(prog);
|
2021-02-10 06:36:29 +03:00
|
|
|
return 0;
|
2021-02-10 06:36:31 +03:00
|
|
|
}
|
2021-02-10 06:36:28 +03:00
|
|
|
return bpf_prog_start_time();
|
2020-08-28 01:01:11 +03:00
|
|
|
}
|
|
|
|
|
2021-02-10 06:36:28 +03:00
|
|
|
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
|
2020-08-28 01:01:11 +03:00
|
|
|
{
|
2021-02-10 06:36:28 +03:00
|
|
|
update_prog_stats(prog, start);
|
2021-02-10 06:36:29 +03:00
|
|
|
__this_cpu_dec(*(prog->active));
|
2021-02-10 06:36:27 +03:00
|
|
|
migrate_enable();
|
2020-08-28 01:01:11 +03:00
|
|
|
rcu_read_unlock_trace();
|
|
|
|
}
|
|
|
|
|
2021-03-17 00:00:07 +03:00
|
|
|
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
|
|
|
|
{
|
|
|
|
percpu_ref_get(&tr->pcref);
|
|
|
|
}
|
|
|
|
|
|
|
|
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
|
|
|
|
{
|
|
|
|
percpu_ref_put(&tr->pcref);
|
|
|
|
}
|
|
|
|
|
2019-11-14 21:57:04 +03:00
|
|
|
int __weak
|
2021-03-17 00:00:07 +03:00
|
|
|
arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
|
bpf: Introduce BPF_MAP_TYPE_STRUCT_OPS
The patch introduces BPF_MAP_TYPE_STRUCT_OPS. The map value
is a kernel struct with its func ptr implemented in bpf prog.
This new map is the interface to register/unregister/introspect
a bpf implemented kernel struct.
The kernel struct is actually embedded inside another new struct
(or called the "value" struct in the code). For example,
"struct tcp_congestion_ops" is embbeded in:
struct bpf_struct_ops_tcp_congestion_ops {
refcount_t refcnt;
enum bpf_struct_ops_state state;
struct tcp_congestion_ops data; /* <-- kernel subsystem struct here */
}
The map value is "struct bpf_struct_ops_tcp_congestion_ops".
The "bpftool map dump" will then be able to show the
state ("inuse"/"tobefree") and the number of subsystem's refcnt (e.g.
number of tcp_sock in the tcp_congestion_ops case). This "value" struct
is created automatically by a macro. Having a separate "value" struct
will also make extending "struct bpf_struct_ops_XYZ" easier (e.g. adding
"void (*init)(void)" to "struct bpf_struct_ops_XYZ" to do some
initialization works before registering the struct_ops to the kernel
subsystem). The libbpf will take care of finding and populating the
"struct bpf_struct_ops_XYZ" from "struct XYZ".
Register a struct_ops to a kernel subsystem:
1. Load all needed BPF_PROG_TYPE_STRUCT_OPS prog(s)
2. Create a BPF_MAP_TYPE_STRUCT_OPS with attr->btf_vmlinux_value_type_id
set to the btf id "struct bpf_struct_ops_tcp_congestion_ops" of the
running kernel.
Instead of reusing the attr->btf_value_type_id,
btf_vmlinux_value_type_id s added such that attr->btf_fd can still be
used as the "user" btf which could store other useful sysadmin/debug
info that may be introduced in the furture,
e.g. creation-date/compiler-details/map-creator...etc.
3. Create a "struct bpf_struct_ops_tcp_congestion_ops" object as described
in the running kernel btf. Populate the value of this object.
The function ptr should be populated with the prog fds.
4. Call BPF_MAP_UPDATE with the object created in (3) as
the map value. The key is always "0".
During BPF_MAP_UPDATE, the code that saves the kernel-func-ptr's
args as an array of u64 is generated. BPF_MAP_UPDATE also allows
the specific struct_ops to do some final checks in "st_ops->init_member()"
(e.g. ensure all mandatory func ptrs are implemented).
If everything looks good, it will register this kernel struct
to the kernel subsystem. The map will not allow further update
from this point.
Unregister a struct_ops from the kernel subsystem:
BPF_MAP_DELETE with key "0".
Introspect a struct_ops:
BPF_MAP_LOOKUP_ELEM with key "0". The map value returned will
have the prog _id_ populated as the func ptr.
The map value state (enum bpf_struct_ops_state) will transit from:
INIT (map created) =>
INUSE (map updated, i.e. reg) =>
TOBEFREE (map value deleted, i.e. unreg)
The kernel subsystem needs to call bpf_struct_ops_get() and
bpf_struct_ops_put() to manage the "refcnt" in the
"struct bpf_struct_ops_XYZ". This patch uses a separate refcnt
for the purose of tracking the subsystem usage. Another approach
is to reuse the map->refcnt and then "show" (i.e. during map_lookup)
the subsystem's usage by doing map->refcnt - map->usercnt to filter out
the map-fd/pinned-map usage. However, that will also tie down the
future semantics of map->refcnt and map->usercnt.
The very first subsystem's refcnt (during reg()) holds one
count to map->refcnt. When the very last subsystem's refcnt
is gone, it will also release the map->refcnt. All bpf_prog will be
freed when the map->refcnt reaches 0 (i.e. during map_free()).
Here is how the bpftool map command will look like:
[root@arch-fb-vm1 bpf]# bpftool map show
6: struct_ops name dctcp flags 0x0
key 4B value 256B max_entries 1 memlock 4096B
btf_id 6
[root@arch-fb-vm1 bpf]# bpftool map dump id 6
[{
"value": {
"refcnt": {
"refs": {
"counter": 1
}
},
"state": 1,
"data": {
"list": {
"next": 0,
"prev": 0
},
"key": 0,
"flags": 2,
"init": 24,
"release": 0,
"ssthresh": 25,
"cong_avoid": 30,
"set_state": 27,
"cwnd_event": 28,
"in_ack_event": 26,
"undo_cwnd": 29,
"pkts_acked": 0,
"min_tso_segs": 0,
"sndbuf_expand": 0,
"cong_control": 0,
"get_info": 0,
"name": [98,112,102,95,100,99,116,99,112,0,0,0,0,0,0,0
],
"owner": 0
}
}
}
]
Misc Notes:
* bpf_struct_ops_map_sys_lookup_elem() is added for syscall lookup.
It does an inplace update on "*value" instead returning a pointer
to syscall.c. Otherwise, it needs a separate copy of "zero" value
for the BPF_STRUCT_OPS_STATE_INIT to avoid races.
* The bpf_struct_ops_map_delete_elem() is also called without
preempt_disable() from map_delete_elem(). It is because
the "->unreg()" may requires sleepable context, e.g.
the "tcp_unregister_congestion_control()".
* "const" is added to some of the existing "struct btf_func_model *"
function arg to avoid a compiler warning caused by this patch.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200109003505.3855919-1-kafai@fb.com
2020-01-09 03:35:05 +03:00
|
|
|
const struct btf_func_model *m, u32 flags,
|
2020-03-04 22:18:47 +03:00
|
|
|
struct bpf_tramp_progs *tprogs,
|
2019-11-14 21:57:04 +03:00
|
|
|
void *orig_call)
|
|
|
|
{
|
|
|
|
return -ENOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init init_trampolines(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
|
|
|
|
INIT_HLIST_HEAD(&trampoline_table[i]);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
late_initcall(init_trampolines);
|