bpf: verifier: refactor check_attach_btf_id()

The check_attach_btf_id() function really does three things:

1. It performs a bunch of checks on the program to ensure that the
   attachment is valid.

2. It stores a bunch of state about the attachment being requested in
   the verifier environment and struct bpf_prog objects.

3. It allocates a trampoline for the attachment.

This patch splits out (1.) and (3.) into separate functions which will
perform the checks, but return the computed values instead of directly
modifying the environment. This is done in preparation for reusing the
checks when the actual attachment is happening, which will allow tracing
programs to have multiple (compatible) attachments.

This also fixes a bug where a bunch of checks were skipped if a trampoline
already existed for the tracing target.

Fixes: 6ba43b761c ("bpf: Attachment verification for BPF_MODIFY_RETURN")
Fixes: 1e6c62a882 ("bpf: Introduce sleepable BPF programs")
Acked-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Toke Høiland-Jørgensen 2020-09-25 23:25:02 +02:00 коммит произвёл Alexei Starovoitov
Родитель efc68158c4
Коммит f7b12b6fea
4 изменённых файлов: 146 добавлений и 93 удалений

Просмотреть файл

@ -606,6 +606,13 @@ struct bpf_trampoline {
struct bpf_ksym ksym;
};
struct bpf_attach_target_info {
struct btf_func_model fmodel;
long tgt_addr;
const char *tgt_name;
const struct btf_type *tgt_type;
};
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
struct bpf_dispatcher_prog {
@ -633,9 +640,10 @@ static __always_inline unsigned int bpf_dispatcher_nop_func(
return bpf_func(ctx, insnsi);
}
#ifdef CONFIG_BPF_JIT
struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
int bpf_trampoline_link_prog(struct bpf_prog *prog);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
@ -680,10 +688,6 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
#else
static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
return NULL;
}
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
{
return -ENOTSUPP;
@ -692,6 +696,11 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
{
return -ENOTSUPP;
}
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
#define DECLARE_BPF_DISPATCHER(name)

Просмотреть файл

@ -450,4 +450,17 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
u32 btf_id)
{
return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id;
}
int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *prog,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info);
#endif /* _LINUX_BPF_VERIFIER_H */

Просмотреть файл

@ -65,7 +65,7 @@ static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
bpf_image_ksym_add(tr->image, ksym);
}
struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
struct bpf_trampoline *tr;
struct hlist_head *head;
@ -336,6 +336,26 @@ out:
return err;
}
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
struct bpf_trampoline *tr;
tr = bpf_trampoline_lookup(key);
if (!tr)
return NULL;
mutex_lock(&tr->mutex);
if (tr->func.addr)
goto out;
memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
tr->func.addr = (void *)tgt_info->tgt_addr;
out:
mutex_unlock(&tr->mutex);
return tr;
}
void bpf_trampoline_put(struct bpf_trampoline *tr)
{
if (!tr)

Просмотреть файл

@ -11203,11 +11203,10 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
#define SECURITY_PREFIX "security_"
static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
static int check_attach_modify_return(unsigned long addr, const char *func_name)
{
if (within_error_injection_list(addr) ||
!strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
sizeof(SECURITY_PREFIX) - 1))
!strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
return 0;
return -EINVAL;
@ -11244,43 +11243,26 @@ static int check_non_sleepable_error_inject(u32 btf_id)
return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
}
static int check_attach_btf_id(struct bpf_verifier_env *env)
int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *prog,
const struct bpf_prog *tgt_prog,
u32 btf_id,
struct bpf_attach_target_info *tgt_info)
{
struct bpf_prog *prog = env->prog;
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
struct bpf_prog *tgt_prog = prog->aux->linked_prog;
struct bpf_verifier_log *log = &env->log;
u32 btf_id = prog->aux->attach_btf_id;
const char prefix[] = "btf_trace_";
struct btf_func_model fmodel;
int ret = 0, subprog = -1, i;
struct bpf_trampoline *tr;
const struct btf_type *t;
bool conservative = true;
const char *tname;
struct btf *btf;
long addr;
u64 key;
if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM) {
verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
return -EINVAL;
}
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
return check_struct_ops_btf_id(env);
if (prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM &&
!prog_extension)
return 0;
long addr = 0;
if (!btf_id) {
bpf_log(log, "Tracing programs must provide btf_id\n");
return -EINVAL;
}
btf = bpf_prog_get_target_btf(prog);
btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
if (!btf) {
bpf_log(log,
"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
@ -11320,8 +11302,6 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
"Extension programs should be JITed\n");
return -EINVAL;
}
env->ops = bpf_verifier_ops[tgt_prog->type];
prog->expected_attach_type = tgt_prog->expected_attach_type;
}
if (!tgt_prog->jited) {
bpf_log(log, "Can attach to only JITed progs\n");
@ -11357,13 +11337,11 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
bpf_log(log, "Cannot extend fentry/fexit\n");
return -EINVAL;
}
key = ((u64)aux->id) << 32 | btf_id;
} else {
if (prog_extension) {
bpf_log(log, "Cannot replace kernel functions\n");
return -EINVAL;
}
key = btf_id;
}
switch (prog->expected_attach_type) {
@ -11393,13 +11371,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
/* should never happen in valid vmlinux build */
return -EINVAL;
/* remember two read only pointers that are valid for
* the life time of the kernel
*/
prog->aux->attach_func_name = tname;
prog->aux->attach_func_proto = t;
prog->aux->attach_btf_trace = true;
return 0;
break;
case BPF_TRACE_ITER:
if (!btf_type_is_func(t)) {
bpf_log(log, "attach_btf_id %u is not a function\n",
@ -11409,12 +11381,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
return -EINVAL;
prog->aux->attach_func_name = tname;
prog->aux->attach_func_proto = t;
if (!bpf_iter_prog_supported(prog))
return -EINVAL;
ret = btf_distill_func_proto(log, btf, t, tname, &fmodel);
return ret;
ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
if (ret)
return ret;
break;
default:
if (!prog_extension)
return -EINVAL;
@ -11423,13 +11393,6 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
case BPF_LSM_MAC:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
prog->aux->attach_func_name = tname;
if (prog->type == BPF_PROG_TYPE_LSM) {
ret = bpf_lsm_verify_prog(log, prog);
if (ret < 0)
return ret;
}
if (!btf_type_is_func(t)) {
bpf_log(log, "attach_btf_id %u is not a function\n",
btf_id);
@ -11441,24 +11404,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
return -EINVAL;
tr = bpf_trampoline_lookup(key);
if (!tr)
return -ENOMEM;
/* t is either vmlinux type or another program's type */
prog->aux->attach_func_proto = t;
mutex_lock(&tr->mutex);
if (tr->func.addr) {
prog->aux->trampoline = tr;
goto out;
}
if (tgt_prog && conservative) {
prog->aux->attach_func_proto = NULL;
if (tgt_prog && conservative)
t = NULL;
}
ret = btf_distill_func_proto(log, btf, t,
tname, &tr->func.model);
ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
if (ret < 0)
goto out;
return ret;
if (tgt_prog) {
if (subprog == 0)
addr = (long) tgt_prog->bpf_func;
@ -11470,8 +11423,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
bpf_log(log,
"The address of function %s cannot be found\n",
tname);
ret = -ENOENT;
goto out;
return -ENOENT;
}
}
@ -11496,30 +11448,89 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
default:
break;
}
if (ret)
bpf_log(log, "%s is not sleepable\n",
prog->aux->attach_func_name);
if (ret) {
bpf_log(log, "%s is not sleepable\n", tname);
return ret;
}
} else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
if (tgt_prog) {
bpf_log(log, "can't modify return codes of BPF programs\n");
ret = -EINVAL;
goto out;
return -EINVAL;
}
ret = check_attach_modify_return(addr, tname);
if (ret) {
bpf_log(log, "%s() is not modifiable\n", tname);
return ret;
}
ret = check_attach_modify_return(prog, addr);
if (ret)
bpf_log(log, "%s() is not modifiable\n",
prog->aux->attach_func_name);
}
if (ret)
goto out;
tr->func.addr = (void *)addr;
prog->aux->trampoline = tr;
out:
mutex_unlock(&tr->mutex);
if (ret)
bpf_trampoline_put(tr);
return ret;
break;
}
tgt_info->tgt_addr = addr;
tgt_info->tgt_name = tname;
tgt_info->tgt_type = t;
return 0;
}
static int check_attach_btf_id(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog;
struct bpf_prog *tgt_prog = prog->aux->linked_prog;
struct bpf_attach_target_info tgt_info = {};
u32 btf_id = prog->aux->attach_btf_id;
struct bpf_trampoline *tr;
int ret;
u64 key;
if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM) {
verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
return -EINVAL;
}
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
return check_struct_ops_btf_id(env);
if (prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM &&
prog->type != BPF_PROG_TYPE_EXT)
return 0;
ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
if (ret)
return ret;
if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
env->ops = bpf_verifier_ops[tgt_prog->type];
prog->expected_attach_type = tgt_prog->expected_attach_type;
}
/* store info about the attachment target that will be used later */
prog->aux->attach_func_proto = tgt_info.tgt_type;
prog->aux->attach_func_name = tgt_info.tgt_name;
if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
prog->aux->attach_btf_trace = true;
return 0;
} else if (prog->expected_attach_type == BPF_TRACE_ITER) {
if (!bpf_iter_prog_supported(prog))
return -EINVAL;
return 0;
}
if (prog->type == BPF_PROG_TYPE_LSM) {
ret = bpf_lsm_verify_prog(&env->log, prog);
if (ret < 0)
return ret;
}
key = bpf_trampoline_compute_key(tgt_prog, btf_id);
tr = bpf_trampoline_get(key, &tgt_info);
if (!tr)
return -ENOMEM;
prog->aux->trampoline = tr;
return 0;
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,