bpf: Support readonly/readwrite buffers in verifier
Readonly and readwrite buffer register states are introduced. Totally four states, PTR_TO_RDONLY_BUF[_OR_NULL] and PTR_TO_RDWR_BUF[_OR_NULL] are supported. As suggested by their respective names, PTR_TO_RDONLY_BUF[_OR_NULL] are for readonly buffers and PTR_TO_RDWR_BUF[_OR_NULL] for read/write buffers. These new register states will be used by later bpf map element iterator. New register states share some similarity to PTR_TO_TP_BUFFER as it will calculate accessed buffer size during verification time. The accessed buffer size will be later compared to other metrics during later attach/link_create time. Similar to reg_state PTR_TO_BTF_ID_OR_NULL in bpf iterator programs, PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL reg_types can be set at prog->aux->bpf_ctx_arg_aux, and bpf verifier will retrieve the values during btf_ctx_access(). Later bpf map element iterator implementation will show how such information will be assigned during target registeration time. The verifier is also enhanced such that PTR_TO_RDONLY_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] helper argument, and PTR_TO_RDWR_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] or ARG_PTR_TO_UNINIT_MEM. Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200723184111.590274-1-yhs@fb.com
This commit is contained in:
Родитель
f9c7927295
Коммит
afbf21dce6
|
@ -353,6 +353,10 @@ enum bpf_reg_type {
|
|||
PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */
|
||||
PTR_TO_MEM, /* reg points to valid memory region */
|
||||
PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
|
||||
PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
|
||||
PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
|
||||
PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
|
||||
PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
|
||||
};
|
||||
|
||||
/* The information passed from prog-specific *_is_valid_access
|
||||
|
@ -694,6 +698,8 @@ struct bpf_prog_aux {
|
|||
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
|
||||
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
|
||||
u32 ctx_arg_info_size;
|
||||
u32 max_rdonly_access;
|
||||
u32 max_rdwr_access;
|
||||
const struct bpf_ctx_arg_aux *ctx_arg_info;
|
||||
struct bpf_prog *linked_prog;
|
||||
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
|
||||
|
|
|
@ -3806,6 +3806,19 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
|||
btf_kind_str[BTF_INFO_KIND(t->info)]);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
|
||||
for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
|
||||
const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
|
||||
|
||||
if (ctx_arg_info->offset == off &&
|
||||
(ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL ||
|
||||
ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) {
|
||||
info->reg_type = ctx_arg_info->reg_type;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (t->type == 0)
|
||||
/* This is a pointer to void.
|
||||
* It is the same as scalar from the verifier safety pov.
|
||||
|
|
|
@ -409,7 +409,9 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
|
|||
type == PTR_TO_SOCK_COMMON_OR_NULL ||
|
||||
type == PTR_TO_TCP_SOCK_OR_NULL ||
|
||||
type == PTR_TO_BTF_ID_OR_NULL ||
|
||||
type == PTR_TO_MEM_OR_NULL;
|
||||
type == PTR_TO_MEM_OR_NULL ||
|
||||
type == PTR_TO_RDONLY_BUF_OR_NULL ||
|
||||
type == PTR_TO_RDWR_BUF_OR_NULL;
|
||||
}
|
||||
|
||||
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
|
||||
|
@ -503,6 +505,10 @@ static const char * const reg_type_str[] = {
|
|||
[PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
|
||||
[PTR_TO_MEM] = "mem",
|
||||
[PTR_TO_MEM_OR_NULL] = "mem_or_null",
|
||||
[PTR_TO_RDONLY_BUF] = "rdonly_buf",
|
||||
[PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null",
|
||||
[PTR_TO_RDWR_BUF] = "rdwr_buf",
|
||||
[PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null",
|
||||
};
|
||||
|
||||
static char slot_type_char[] = {
|
||||
|
@ -2173,6 +2179,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
|||
case PTR_TO_XDP_SOCK:
|
||||
case PTR_TO_BTF_ID:
|
||||
case PTR_TO_BTF_ID_OR_NULL:
|
||||
case PTR_TO_RDONLY_BUF:
|
||||
case PTR_TO_RDONLY_BUF_OR_NULL:
|
||||
case PTR_TO_RDWR_BUF:
|
||||
case PTR_TO_RDWR_BUF_OR_NULL:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -3052,14 +3062,15 @@ int check_ctx_reg(struct bpf_verifier_env *env,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int check_tp_buffer_access(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int regno, int off, int size)
|
||||
static int __check_buffer_access(struct bpf_verifier_env *env,
|
||||
const char *buf_info,
|
||||
const struct bpf_reg_state *reg,
|
||||
int regno, int off, int size)
|
||||
{
|
||||
if (off < 0) {
|
||||
verbose(env,
|
||||
"R%d invalid tracepoint buffer access: off=%d, size=%d",
|
||||
regno, off, size);
|
||||
"R%d invalid %s buffer access: off=%d, size=%d",
|
||||
regno, buf_info, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
|
@ -3071,12 +3082,45 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env,
|
|||
regno, off, tn_buf);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_tp_buffer_access(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int regno, int off, int size)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (off + size > env->prog->aux->max_tp_access)
|
||||
env->prog->aux->max_tp_access = off + size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_buffer_access(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int regno, int off, int size,
|
||||
bool zero_size_allowed,
|
||||
const char *buf_info,
|
||||
u32 *max_access)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __check_buffer_access(env, buf_info, reg, regno, off, size);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (off + size > *max_access)
|
||||
*max_access = off + size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* BPF architecture zero extends alu32 ops into 64-bit registesr */
|
||||
static void zext_32_to_64(struct bpf_reg_state *reg)
|
||||
{
|
||||
|
@ -3427,6 +3471,23 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|||
} else if (reg->type == CONST_PTR_TO_MAP) {
|
||||
err = check_ptr_to_map_access(env, regs, regno, off, size, t,
|
||||
value_regno);
|
||||
} else if (reg->type == PTR_TO_RDONLY_BUF) {
|
||||
if (t == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n",
|
||||
regno, reg_type_str[reg->type]);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_buffer_access(env, reg, regno, off, size, "rdonly",
|
||||
false,
|
||||
&env->prog->aux->max_rdonly_access);
|
||||
if (!err && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else if (reg->type == PTR_TO_RDWR_BUF) {
|
||||
err = check_buffer_access(env, reg, regno, off, size, "rdwr",
|
||||
false,
|
||||
&env->prog->aux->max_rdwr_access);
|
||||
if (!err && t == BPF_READ && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else {
|
||||
verbose(env, "R%d invalid mem access '%s'\n", regno,
|
||||
reg_type_str[reg->type]);
|
||||
|
@ -3668,6 +3729,18 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
|||
return check_mem_region_access(env, regno, reg->off,
|
||||
access_size, reg->mem_size,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_RDONLY_BUF:
|
||||
if (meta && meta->raw_mode)
|
||||
return -EACCES;
|
||||
return check_buffer_access(env, reg, regno, reg->off,
|
||||
access_size, zero_size_allowed,
|
||||
"rdonly",
|
||||
&env->prog->aux->max_rdonly_access);
|
||||
case PTR_TO_RDWR_BUF:
|
||||
return check_buffer_access(env, reg, regno, reg->off,
|
||||
access_size, zero_size_allowed,
|
||||
"rdwr",
|
||||
&env->prog->aux->max_rdwr_access);
|
||||
default: /* scalar_value|ptr_to_stack or invalid ptr */
|
||||
return check_stack_boundary(env, regno, access_size,
|
||||
zero_size_allowed, meta);
|
||||
|
@ -3933,6 +4006,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
|
|||
else if (!type_is_pkt_pointer(type) &&
|
||||
type != PTR_TO_MAP_VALUE &&
|
||||
type != PTR_TO_MEM &&
|
||||
type != PTR_TO_RDONLY_BUF &&
|
||||
type != PTR_TO_RDWR_BUF &&
|
||||
type != expected_type)
|
||||
goto err_type;
|
||||
meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
|
||||
|
@ -6806,6 +6881,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
|||
reg->type = PTR_TO_BTF_ID;
|
||||
} else if (reg->type == PTR_TO_MEM_OR_NULL) {
|
||||
reg->type = PTR_TO_MEM;
|
||||
} else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) {
|
||||
reg->type = PTR_TO_RDONLY_BUF;
|
||||
} else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) {
|
||||
reg->type = PTR_TO_RDWR_BUF;
|
||||
}
|
||||
if (is_null) {
|
||||
/* We don't need id and ref_obj_id from this point
|
||||
|
|
Загрузка…
Ссылка в новой задаче