Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2018-01-18 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix a divide by zero due to wrong if (src_reg == 0) check in 64-bit mode. Properly handle this in interpreter and mask it also generically in verifier to guard against similar checks in JITs, from Eric and Alexei. 2) Fix a bug in arm64 JIT when tail calls are involved and progs have different stack sizes, from Daniel. 3) Reject stores into BPF context that are not expected BPF_STX | BPF_MEM variant, from Daniel. 4) Mark dst reg as unknown on {s,u}bounds adjustments when the src reg has derived bounds from dead branches, from Daniel. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
7155f8f391
|
@ -148,7 +148,8 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
|
|||
/* Stack must be multiples of 16B */
|
||||
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
|
||||
|
||||
#define PROLOGUE_OFFSET 8
|
||||
/* Tail call offset to jump into */
|
||||
#define PROLOGUE_OFFSET 7
|
||||
|
||||
static int build_prologue(struct jit_ctx *ctx)
|
||||
{
|
||||
|
@ -200,19 +201,19 @@ static int build_prologue(struct jit_ctx *ctx)
|
|||
/* Initialize tail_call_cnt */
|
||||
emit(A64_MOVZ(1, tcc, 0, 0), ctx);
|
||||
|
||||
/* 4 byte extra for skb_copy_bits buffer */
|
||||
ctx->stack_size = prog->aux->stack_depth + 4;
|
||||
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
|
||||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
|
||||
|
||||
cur_offset = ctx->idx - idx0;
|
||||
if (cur_offset != PROLOGUE_OFFSET) {
|
||||
pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
|
||||
cur_offset, PROLOGUE_OFFSET);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* 4 byte extra for skb_copy_bits buffer */
|
||||
ctx->stack_size = prog->aux->stack_depth + 4;
|
||||
ctx->stack_size = STACK_ALIGN(ctx->stack_size);
|
||||
|
||||
/* Set up function call stack */
|
||||
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -260,11 +261,12 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
|||
emit(A64_LDR64(prg, tmp, prg), ctx);
|
||||
emit(A64_CBZ(1, prg, jmp_offset), ctx);
|
||||
|
||||
/* goto *(prog->bpf_func + prologue_size); */
|
||||
/* goto *(prog->bpf_func + prologue_offset); */
|
||||
off = offsetof(struct bpf_prog, bpf_func);
|
||||
emit_a64_mov_i64(tmp, off, ctx);
|
||||
emit(A64_LDR64(tmp, prg, tmp), ctx);
|
||||
emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
|
||||
emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
|
||||
emit(A64_BR(tmp), ctx);
|
||||
|
||||
/* out: */
|
||||
|
|
|
@ -956,7 +956,7 @@ select_insn:
|
|||
DST = tmp;
|
||||
CONT;
|
||||
ALU_MOD_X:
|
||||
if (unlikely(SRC == 0))
|
||||
if (unlikely((u32)SRC == 0))
|
||||
return 0;
|
||||
tmp = (u32) DST;
|
||||
DST = do_div(tmp, (u32) SRC);
|
||||
|
@ -975,7 +975,7 @@ select_insn:
|
|||
DST = div64_u64(DST, SRC);
|
||||
CONT;
|
||||
ALU_DIV_X:
|
||||
if (unlikely(SRC == 0))
|
||||
if (unlikely((u32)SRC == 0))
|
||||
return 0;
|
||||
tmp = (u32) DST;
|
||||
do_div(tmp, (u32) SRC);
|
||||
|
|
|
@ -978,6 +978,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
|
|||
return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno);
|
||||
}
|
||||
|
||||
static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = cur_regs(env) + regno;
|
||||
|
||||
return reg->type == PTR_TO_CTX;
|
||||
}
|
||||
|
||||
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, bool strict)
|
||||
|
@ -1258,6 +1265,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (is_ctx_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
|
||||
insn->dst_reg);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check whether atomic_add can read the memory */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, -1);
|
||||
|
@ -1882,17 +1895,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|||
|
||||
dst_reg = ®s[dst];
|
||||
|
||||
if (WARN_ON_ONCE(known && (smin_val != smax_val))) {
|
||||
print_verifier_state(env, env->cur_state);
|
||||
verbose(env,
|
||||
"verifier internal error: known but bad sbounds\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (WARN_ON_ONCE(known && (umin_val != umax_val))) {
|
||||
print_verifier_state(env, env->cur_state);
|
||||
verbose(env,
|
||||
"verifier internal error: known but bad ubounds\n");
|
||||
return -EINVAL;
|
||||
if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
|
||||
smin_val > smax_val || umin_val > umax_val) {
|
||||
/* Taint dst register if offset had invalid bounds derived from
|
||||
* e.g. dead branches.
|
||||
*/
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (BPF_CLASS(insn->code) != BPF_ALU64) {
|
||||
|
@ -2084,6 +2093,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
src_known = tnum_is_const(src_reg.var_off);
|
||||
dst_known = tnum_is_const(dst_reg->var_off);
|
||||
|
||||
if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
|
||||
smin_val > smax_val || umin_val > umax_val) {
|
||||
/* Taint dst register if offset had invalid bounds derived from
|
||||
* e.g. dead branches.
|
||||
*/
|
||||
__mark_reg_unknown(dst_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!src_known &&
|
||||
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
|
||||
__mark_reg_unknown(dst_reg);
|
||||
|
@ -3993,6 +4011,12 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_ctx_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_ST stores into R%d context is not allowed\n",
|
||||
insn->dst_reg);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check that memory (dst_reg + off) is writeable */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE,
|
||||
|
@ -4445,6 +4469,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
int i, cnt, delta = 0;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
|
||||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
|
||||
/* due to JIT bugs clear upper 32-bits of src register
|
||||
* before div/mod operation
|
||||
*/
|
||||
insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
|
||||
insn_buf[1] = *insn;
|
||||
cnt = 2;
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (insn->code != (BPF_JMP | BPF_CALL))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -458,6 +458,10 @@ do_pass:
|
|||
convert_bpf_extensions(fp, &insn))
|
||||
break;
|
||||
|
||||
if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
|
||||
fp->code == (BPF_ALU | BPF_MOD | BPF_X))
|
||||
*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
|
||||
|
||||
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
|
||||
break;
|
||||
|
||||
|
|
|
@ -2592,6 +2592,29 @@ static struct bpf_test tests[] = {
|
|||
.result = ACCEPT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"context stores via ST",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "BPF_ST stores into R1 context is not allowed",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"context stores via XADD",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
|
||||
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "BPF_XADD stores into R1 context is not allowed",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"direct packet access: test1",
|
||||
.insns = {
|
||||
|
@ -4312,7 +4335,8 @@ static struct bpf_test tests[] = {
|
|||
.fixup_map1 = { 2 },
|
||||
.errstr_unpriv = "R2 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_XADD stores into R1 context is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 2",
|
||||
|
@ -4326,7 +4350,8 @@ static struct bpf_test tests[] = {
|
|||
},
|
||||
.errstr_unpriv = "R10 leaks addr into mem",
|
||||
.result_unpriv = REJECT,
|
||||
.result = ACCEPT,
|
||||
.result = REJECT,
|
||||
.errstr = "BPF_XADD stores into R1 context is not allowed",
|
||||
},
|
||||
{
|
||||
"leak pointer into ctx 3",
|
||||
|
@ -6707,7 +6732,7 @@ static struct bpf_test tests[] = {
|
|||
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr = "unbounded min value",
|
||||
.errstr = "R0 invalid mem access 'inv'",
|
||||
.result = REJECT,
|
||||
},
|
||||
{
|
||||
|
@ -8608,6 +8633,127 @@ static struct bpf_test tests[] = {
|
|||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 1",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 2",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 3",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 4",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 5",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 6",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 7",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, ~0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 8",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, ~0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 9",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 tried to subtract pointer from scalar",
|
||||
},
|
||||
{
|
||||
"check deducing bounds from const, 10",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
|
||||
/* Marks reg as unknown. */
|
||||
BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
|
||||
},
|
||||
{
|
||||
"bpf_exit with invalid return code. test1",
|
||||
.insns = {
|
||||
|
|
Загрузка…
Ссылка в новой задаче