bpf: reject program if a __user tagged memory accessed in kernel way
BPF verifier supports direct memory access for BPF_PROG_TYPE_TRACING type of bpf programs, e.g., a->b. If "a" is a pointer pointing to kernel memory, bpf verifier will allow user to write code in C like a->b and the verifier will translate it to a kernel load properly. If "a" is a pointer to user memory, it is expected that bpf developer should be bpf_probe_read_user() helper to get the value a->b. Without utilizing BTF __user tagging information, current verifier will assume that a->b is a kernel memory access and this may generate incorrect result. Now BTF contains __user information, it can check whether the pointer points to a user memory or not. If it is, the verifier can reject the program and force users to use bpf_probe_read_user() helper explicitly. In the future, we can easily extend btf_add_space for other address space tagging, for example, rcu/percpu etc. Signed-off-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/r/20220127154606.654961-1-yhs@fb.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Родитель
7472d5a642
Коммит
c6f1bfe89a
|
@ -332,7 +332,10 @@ enum bpf_type_flag {
|
|||
*/
|
||||
MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_ALLOC,
|
||||
/* MEM is in user address space. */
|
||||
MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
|
||||
|
||||
__BPF_TYPE_LAST_FLAG = MEM_USER,
|
||||
};
|
||||
|
||||
/* Max number of base types. */
|
||||
|
@ -588,7 +591,7 @@ struct bpf_verifier_ops {
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
|
@ -1780,7 +1783,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size,
|
|||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
||||
const struct btf *btf, u32 id, int off,
|
||||
const struct btf *need_btf, u32 need_type_id);
|
||||
|
|
|
@ -238,6 +238,11 @@ static inline bool btf_type_is_var(const struct btf_type *t)
|
|||
return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
|
||||
}
|
||||
|
||||
static inline bool btf_type_is_type_tag(const struct btf_type *t)
|
||||
{
|
||||
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG;
|
||||
}
|
||||
|
||||
/* union is only a special case of struct:
|
||||
* all its offsetof(member) == 0
|
||||
*/
|
||||
|
|
|
@ -4886,6 +4886,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
const char *tname = prog->aux->attach_func_name;
|
||||
struct bpf_verifier_log *log = info->log;
|
||||
const struct btf_param *args;
|
||||
const char *tag_value;
|
||||
u32 nr_args, arg;
|
||||
int i, ret;
|
||||
|
||||
|
@ -5038,6 +5039,13 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
info->btf = btf;
|
||||
info->btf_id = t->type;
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
|
||||
if (btf_type_is_type_tag(t)) {
|
||||
tag_value = __btf_name_by_offset(btf, t->name_off);
|
||||
if (strcmp(tag_value, "user") == 0)
|
||||
info->reg_type |= MEM_USER;
|
||||
}
|
||||
|
||||
/* skip modifiers */
|
||||
while (btf_type_is_modifier(t)) {
|
||||
info->btf_id = t->type;
|
||||
|
@ -5064,12 +5072,12 @@ enum bpf_struct_walk_result {
|
|||
|
||||
static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
{
|
||||
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
|
||||
const struct btf_type *mtype, *elem_type = NULL;
|
||||
const struct btf_member *member;
|
||||
const char *tname, *mname;
|
||||
const char *tname, *mname, *tag_value;
|
||||
u32 vlen, elem_id, mid;
|
||||
|
||||
again:
|
||||
|
@ -5253,7 +5261,8 @@ error:
|
|||
}
|
||||
|
||||
if (btf_type_is_ptr(mtype)) {
|
||||
const struct btf_type *stype;
|
||||
const struct btf_type *stype, *t;
|
||||
enum bpf_type_flag tmp_flag = 0;
|
||||
u32 id;
|
||||
|
||||
if (msize != size || off != moff) {
|
||||
|
@ -5262,9 +5271,19 @@ error:
|
|||
mname, moff, tname, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check __user tag */
|
||||
t = btf_type_by_id(btf, mtype->type);
|
||||
if (btf_type_is_type_tag(t)) {
|
||||
tag_value = __btf_name_by_offset(btf, t->name_off);
|
||||
if (strcmp(tag_value, "user") == 0)
|
||||
tmp_flag = MEM_USER;
|
||||
}
|
||||
|
||||
stype = btf_type_skip_modifiers(btf, mtype->type, &id);
|
||||
if (btf_type_is_struct(stype)) {
|
||||
*next_btf_id = id;
|
||||
*flag = tmp_flag;
|
||||
return WALK_PTR;
|
||||
}
|
||||
}
|
||||
|
@ -5291,13 +5310,14 @@ error:
|
|||
int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype __maybe_unused,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
{
|
||||
enum bpf_type_flag tmp_flag = 0;
|
||||
int err;
|
||||
u32 id;
|
||||
|
||||
do {
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id);
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
|
||||
|
||||
switch (err) {
|
||||
case WALK_PTR:
|
||||
|
@ -5305,6 +5325,7 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
|
|||
* we're done.
|
||||
*/
|
||||
*next_btf_id = id;
|
||||
*flag = tmp_flag;
|
||||
return PTR_TO_BTF_ID;
|
||||
case WALK_SCALAR:
|
||||
return SCALAR_VALUE;
|
||||
|
@ -5349,6 +5370,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
|||
const struct btf *need_btf, u32 need_type_id)
|
||||
{
|
||||
const struct btf_type *type;
|
||||
enum bpf_type_flag flag;
|
||||
int err;
|
||||
|
||||
/* Are we already done? */
|
||||
|
@ -5359,7 +5381,7 @@ again:
|
|||
type = btf_type_by_id(btf, id);
|
||||
if (!type)
|
||||
return false;
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id);
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
|
||||
if (err != WALK_STRUCT)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -536,7 +536,7 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
|
|||
static const char *reg_type_str(struct bpf_verifier_env *env,
|
||||
enum bpf_reg_type type)
|
||||
{
|
||||
char postfix[16] = {0}, prefix[16] = {0};
|
||||
char postfix[16] = {0}, prefix[32] = {0};
|
||||
static const char * const str[] = {
|
||||
[NOT_INIT] = "?",
|
||||
[SCALAR_VALUE] = "inv",
|
||||
|
@ -570,9 +570,11 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
|
|||
}
|
||||
|
||||
if (type & MEM_RDONLY)
|
||||
strncpy(prefix, "rdonly_", 16);
|
||||
strncpy(prefix, "rdonly_", 32);
|
||||
if (type & MEM_ALLOC)
|
||||
strncpy(prefix, "alloc_", 16);
|
||||
strncpy(prefix, "alloc_", 32);
|
||||
if (type & MEM_USER)
|
||||
strncpy(prefix, "user_", 32);
|
||||
|
||||
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
|
||||
prefix, str[base_type(type)], postfix);
|
||||
|
@ -1547,14 +1549,15 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
|
|||
static void mark_btf_ld_reg(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *regs, u32 regno,
|
||||
enum bpf_reg_type reg_type,
|
||||
struct btf *btf, u32 btf_id)
|
||||
struct btf *btf, u32 btf_id,
|
||||
enum bpf_type_flag flag)
|
||||
{
|
||||
if (reg_type == SCALAR_VALUE) {
|
||||
mark_reg_unknown(env, regs, regno);
|
||||
return;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, regno);
|
||||
regs[regno].type = PTR_TO_BTF_ID;
|
||||
regs[regno].type = PTR_TO_BTF_ID | flag;
|
||||
regs[regno].btf = btf;
|
||||
regs[regno].btf_id = btf_id;
|
||||
}
|
||||
|
@ -4152,6 +4155,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
struct bpf_reg_state *reg = regs + regno;
|
||||
const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
|
||||
const char *tname = btf_name_by_offset(reg->btf, t->name_off);
|
||||
enum bpf_type_flag flag = 0;
|
||||
u32 btf_id;
|
||||
int ret;
|
||||
|
||||
|
@ -4171,9 +4175,16 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (reg->type & MEM_USER) {
|
||||
verbose(env,
|
||||
"R%d is ptr_%s access user memory: off=%d\n",
|
||||
regno, tname, off);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (env->ops->btf_struct_access) {
|
||||
ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
|
||||
off, size, atype, &btf_id);
|
||||
off, size, atype, &btf_id, &flag);
|
||||
} else {
|
||||
if (atype != BPF_READ) {
|
||||
verbose(env, "only read is supported\n");
|
||||
|
@ -4181,14 +4192,14 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
|||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, reg->btf, t, off, size,
|
||||
atype, &btf_id);
|
||||
atype, &btf_id, &flag);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (atype == BPF_READ && value_regno >= 0)
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id);
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4201,6 +4212,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
|
|||
{
|
||||
struct bpf_reg_state *reg = regs + regno;
|
||||
struct bpf_map *map = reg->map_ptr;
|
||||
enum bpf_type_flag flag = 0;
|
||||
const struct btf_type *t;
|
||||
const char *tname;
|
||||
u32 btf_id;
|
||||
|
@ -4238,12 +4250,12 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id);
|
||||
ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (value_regno >= 0)
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id);
|
||||
mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4444,7 +4456,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, &btf_id);
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf,
|
||||
&btf_id);
|
||||
if (err)
|
||||
verbose_linfo(env, insn_idx, "; ");
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
|
|
|
@ -145,7 +145,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off,
|
||||
int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id,
|
||||
enum bpf_type_flag *flag)
|
||||
{
|
||||
const struct btf_type *state;
|
||||
s32 type_id;
|
||||
|
@ -162,7 +163,8 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
|
||||
err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
|
||||
flag);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -96,12 +96,14 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
|
|||
const struct btf *btf,
|
||||
const struct btf_type *t, int off,
|
||||
int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id)
|
||||
u32 *next_btf_id,
|
||||
enum bpf_type_flag *flag)
|
||||
{
|
||||
size_t end;
|
||||
|
||||
if (atype == BPF_READ)
|
||||
return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
|
||||
return btf_struct_access(log, btf, t, off, size, atype, next_btf_id,
|
||||
flag);
|
||||
|
||||
if (t != tcp_sock_type) {
|
||||
bpf_log(log, "only read is supported\n");
|
||||
|
|
Загрузка…
Ссылка в новой задаче