Merge branch 'dead-code-elimination'

Jakub Kicinski says:

====================
This set adds support for complete removal of dead code.

Patch 3 contains all the code removal logic, patches 2 and 4
additionally optimize branches around and to dead code.

Patches 6 and 7 allow offload JITs to take advantage of the
optimization.  After a few small clean ups (8, 9, 10) nfp
support is added (11, 12).

Removing code directly in the verifier makes it easy to adjust
the relevant metadata (line info, subprogram info).  JITs for
code store constrained architectures would have hard time
performing such adjustments at JIT level.  Removing subprograms
or line info is very hard once BPF core finished the verification.
For user space to perform dead code removal it would have to perform
the execution simulation/analysis similar to what the verifier does.

v3:
 - fix uninitilized var warning in GCC 6 (buildbot).
v4:
 - simplify the linfo-keeping logic (Yonghong).  Instead of
   trying to figure out that we are removing first instruction
   of a subprogram, just always keep last dead line info, if
   first live instruction doesn't have one.
v5:
 - improve comments (Martin Lau).
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2019-01-23 17:35:33 -08:00
Родитель bbebce8eb9 9a06927e77
Коммит 923cefe3f9
12 изменённых файлов: 1004 добавлений и 67 удалений

Просмотреть файл

@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u64 imm = insn->imm; /* sign extend */
if (skip) {
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
@ -1296,7 +1296,7 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
if (skip) {
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_NOOP;
return 0;
}
@ -3182,7 +3182,7 @@ bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
} else {
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1);
emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1);
offset_br = nfp_prog_current_offset(nfp_prog);
}
wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
@ -3395,7 +3395,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
int err;
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
@ -3439,7 +3439,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
jmp_dst = meta->jmp_dst;
if (jmp_dst->skip) {
if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
pr_err("Branch landing on removed instruction!!\n");
return -ELOOP;
}
@ -3689,7 +3689,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
return nfp_prog->error;
}
if (meta->skip) {
if (meta->flags & FLAG_INSN_SKIP_MASK) {
nfp_prog->n_translated++;
continue;
}
@ -3737,10 +3737,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
/* Programs start with R6 = R1 but we ignore the skb pointer */
if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
insn.src_reg == 1 && insn.dst_reg == 6)
meta->skip = true;
meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
/* Return as soon as something doesn't match */
if (!meta->skip)
if (!(meta->flags & FLAG_INSN_SKIP_MASK))
return;
}
}
@ -3755,7 +3755,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
list_for_each_entry(meta, &nfp_prog->insns, l) {
struct bpf_insn insn = meta->insn;
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (BPF_CLASS(insn.code) != BPF_ALU &&
@ -3829,7 +3829,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
continue;
meta2->skip = true;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@ -3869,8 +3869,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
meta3->flags & FLAG_INSN_IS_JUMP_DST)
continue;
meta2->skip = true;
meta3->skip = true;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
}
@ -4065,7 +4065,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
}
head_ld_meta->paired_st = &head_st_meta->insn;
head_st_meta->skip = true;
head_st_meta->flags |=
FLAG_INSN_SKIP_PREC_DEPENDENT;
} else {
head_ld_meta->ldst_gather_len = 0;
}
@ -4098,8 +4099,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
head_ld_meta = meta1;
head_st_meta = meta2;
} else {
meta1->skip = true;
meta2->skip = true;
meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
}
head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
@ -4124,7 +4125,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
if (meta->flags & FLAG_INSN_IS_JUMP_DST)
cache_avail = false;
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
insn = &meta->insn;
@ -4210,7 +4211,7 @@ start_new:
}
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
if (meta->flags & FLAG_INSN_SKIP_MASK)
continue;
if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
@ -4246,7 +4247,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
u32 id;
nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
if (meta1->skip || meta2->skip)
if (meta1->flags & FLAG_INSN_SKIP_MASK ||
meta2->flags & FLAG_INSN_SKIP_MASK)
continue;
if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
@ -4325,7 +4327,7 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog)
return ret;
}
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
@ -4353,7 +4355,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
else
dst_idx = meta->n + 1 + meta->insn.off;
dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx);
if (pseudo_call)
dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;

Просмотреть файл

@ -243,6 +243,16 @@ struct nfp_bpf_reg_state {
#define FLAG_INSN_IS_JUMP_DST BIT(0)
#define FLAG_INSN_IS_SUBPROG_START BIT(1)
#define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
/* Instruction is pointless, noop even on its own */
#define FLAG_INSN_SKIP_NOOP BIT(3)
/* Instruction is optimized out based on preceding instructions */
#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
/* Instruction is optimized by the verifier */
#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
FLAG_INSN_SKIP_PREC_DEPENDENT | \
FLAG_INSN_SKIP_VERIFIER_OPT)
/**
* struct nfp_insn_meta - BPF instruction wrapper
@ -271,7 +281,6 @@ struct nfp_bpf_reg_state {
* @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags
* @subprog_idx: index of subprogram to which the instruction belongs
* @skip: skip this instruction (optimized out)
* @double_cb: callback for second part of the instruction
* @l: link on nfp_prog->insns list
*/
@ -319,7 +328,6 @@ struct nfp_insn_meta {
unsigned short n;
unsigned short flags;
unsigned short subprog_idx;
bool skip;
instr_cb_t double_cb;
struct list_head l;
@ -407,6 +415,17 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
}
static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
{
u8 op;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
return false;
op = BPF_OP(meta->insn.code);
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
}
static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
{
struct bpf_insn insn = meta->insn;
@ -457,6 +476,7 @@ struct nfp_bpf_subprog_info {
* @subprog_cnt: number of sub-programs, including main function
* @map_records: the map record pointers from bpf->maps_neutral
* @subprog: pointer to an array of objects holding info about sub-programs
* @n_insns: number of instructions on @insns list
* @insns: list of BPF instruction wrappers (struct nfp_insn_meta)
*/
struct nfp_prog {
@ -489,6 +509,7 @@ struct nfp_prog {
struct nfp_bpf_neutral_map **map_records;
struct nfp_bpf_subprog_info *subprog;
unsigned int n_insns;
struct list_head insns;
};
@ -505,7 +526,7 @@ struct nfp_bpf_vnic {
};
bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
@ -513,6 +534,10 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
int nfp_bpf_finalize(struct bpf_verifier_env *env);
int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn);
int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
@ -526,7 +551,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns);
unsigned int insn_idx);
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);

Просмотреть файл

@ -163,8 +163,9 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns);
}
nfp_prog->n_insns = cnt;
nfp_bpf_jit_prepare(nfp_prog, cnt);
nfp_bpf_jit_prepare(nfp_prog);
return 0;
}
@ -219,6 +220,10 @@ static int nfp_bpf_translate(struct bpf_prog *prog)
unsigned int max_instr;
int err;
/* We depend on dead code elimination succeeding */
if (prog->aux->offload->opt_failed)
return -EINVAL;
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
@ -591,6 +596,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
.insn_hook = nfp_verify_insn,
.finalize = nfp_bpf_finalize,
.replace_insn = nfp_bpf_opt_replace_insn,
.remove_insns = nfp_bpf_opt_remove_insns,
.prepare = nfp_bpf_verifier_prep,
.translate = nfp_bpf_translate,
.destroy = nfp_bpf_destroy,

Просмотреть файл

@ -18,15 +18,15 @@
struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
unsigned int insn_idx)
{
unsigned int forward, backward, i;
backward = meta->n - insn_idx;
forward = insn_idx - meta->n;
if (min(forward, backward) > n_insns - insn_idx - 1) {
backward = n_insns - insn_idx - 1;
if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) {
backward = nfp_prog->n_insns - insn_idx - 1;
meta = nfp_prog_last_meta(nfp_prog);
}
if (min(forward, backward) > insn_idx && backward > insn_idx) {
@ -629,7 +629,7 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
nfp_prog->verifier_meta = meta;
if (!nfp_bpf_supported_opcode(meta->insn.code)) {
@ -690,8 +690,7 @@ nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
return 0;
}
static unsigned int
nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
unsigned int max_depth = 0, depth = 0, frame = 0;
@ -726,7 +725,7 @@ continue_subprog:
/* Find the callee and start processing it. */
meta = nfp_bpf_goto_meta(nfp_prog, meta,
meta->n + 1 + meta->insn.imm, cnt);
meta->n + 1 + meta->insn.imm);
idx = meta->subprog_idx;
frame++;
goto process_subprog;
@ -778,8 +777,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
nn = netdev_priv(env->prog->aux->offload->netdev);
max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog,
env->prog->len);
nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog);
if (nfp_prog->stack_size > max_stack) {
pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
nfp_prog->stack_size, max_stack);
@ -788,3 +786,61 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
nfp_prog->verifier_meta = meta;
/* conditional jump to jump conversion */
if (is_mbpf_cond_jump(meta) &&
insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
unsigned int tgt_off;
tgt_off = off + insn->off + 1;
if (!insn->off) {
meta->jmp_dst = list_next_entry(meta, l);
meta->jump_neg_op = false;
} else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
off, meta->jmp_dst->n,
aux_data[tgt_off].orig_idx);
return -EINVAL;
}
return 0;
}
pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
meta->insn.code, insn->code);
return -EINVAL;
}
int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
unsigned int i;
meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
for (i = 0; i < cnt; i++) {
if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
return -EINVAL;
/* doesn't count if it already has the flag */
if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
i--;
meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
meta = list_next_entry(meta, l);
}
return 0;
}

Просмотреть файл

@ -268,9 +268,15 @@ struct bpf_verifier_ops {
};
struct bpf_prog_offload_ops {
/* verifier basic callbacks */
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
/* verifier optimization callbacks (called after .finalize) */
int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn);
int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
/* program management callbacks */
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
@ -283,6 +289,7 @@ struct bpf_prog_offload {
void *dev_priv;
struct list_head offloads;
bool dev_state;
bool opt_failed;
void *jited_image;
u32 jited_len;
};

Просмотреть файл

@ -187,6 +187,7 @@ struct bpf_insn_aux_data {
int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
u8 alu_state; /* used in combination with alu_limit */
unsigned int orig_idx; /* original instruction index */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@ -265,5 +266,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
void
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn);
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
#endif /* _LINUX_BPF_VERIFIER_H */

Просмотреть файл

@ -778,6 +778,7 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
void bpf_clear_redirect_map(struct bpf_map *map);

Просмотреть файл

@ -307,15 +307,16 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
return 0;
}
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
u32 curr, const bool probe_pass)
static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, u32 curr, const bool probe_pass)
{
const s64 imm_min = S32_MIN, imm_max = S32_MAX;
s32 delta = end_new - end_old;
s64 imm = insn->imm;
if (curr < pos && curr + imm + 1 > pos)
if (curr < pos && curr + imm + 1 >= end_old)
imm += delta;
else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
else if (curr >= end_new && curr + imm + 1 < end_new)
imm -= delta;
if (imm < imm_min || imm > imm_max)
return -ERANGE;
@ -324,15 +325,16 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
u32 curr, const bool probe_pass)
static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
s32 end_new, u32 curr, const bool probe_pass)
{
const s32 off_min = S16_MIN, off_max = S16_MAX;
s32 delta = end_new - end_old;
s32 off = insn->off;
if (curr < pos && curr + off + 1 > pos)
if (curr < pos && curr + off + 1 >= end_old)
off += delta;
else if (curr > pos + delta && curr + off + 1 <= pos + delta)
else if (curr >= end_new && curr + off + 1 < end_new)
off -= delta;
if (off < off_min || off > off_max)
return -ERANGE;
@ -341,10 +343,10 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
return 0;
}
static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
const bool probe_pass)
static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
s32 end_new, const bool probe_pass)
{
u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
struct bpf_insn *insn = prog->insnsi;
int ret = 0;
@ -356,8 +358,8 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
* do any other adjustments. Therefore skip the patchlet.
*/
if (probe_pass && i == pos) {
i += delta + 1;
insn++;
i = end_new;
insn = prog->insnsi + end_old;
}
code = insn->code;
if (BPF_CLASS(code) != BPF_JMP ||
@ -367,11 +369,11 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
if (BPF_OP(code) == BPF_CALL) {
if (insn->src_reg != BPF_PSEUDO_CALL)
continue;
ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
probe_pass);
ret = bpf_adj_delta_to_imm(insn, pos, end_old,
end_new, i, probe_pass);
} else {
ret = bpf_adj_delta_to_off(insn, pos, delta, i,
probe_pass);
ret = bpf_adj_delta_to_off(insn, pos, end_old,
end_new, i, probe_pass);
}
if (ret)
break;
@ -421,7 +423,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* we afterwards may not fail anymore.
*/
if (insn_adj_cnt > cnt_max &&
bpf_adj_branches(prog, off, insn_delta, true))
bpf_adj_branches(prog, off, off + 1, off + len, true))
return NULL;
/* Several new instructions need to be inserted. Make room
@ -453,13 +455,25 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
* the ship has sailed to reverse to the original state. An
* overflow cannot happen at this point.
*/
BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
bpf_adj_linfo(prog_adj, off, insn_delta);
return prog_adj;
}
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
{
/* Branch offsets can't overflow when program is shrinking, no need
* to call bpf_adj_branches(..., true) here
*/
memmove(prog->insnsi + off, prog->insnsi + off + cnt,
sizeof(struct bpf_insn) * (prog->len - off - cnt));
prog->len -= cnt;
return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
}
void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
{
int i;

Просмотреть файл

@ -173,6 +173,41 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
return ret;
}
void
bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
struct bpf_insn *insn)
{
const struct bpf_prog_offload_ops *ops;
struct bpf_prog_offload *offload;
int ret = -EOPNOTSUPP;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload) {
ops = offload->offdev->ops;
if (!offload->opt_failed && ops->replace_insn)
ret = ops->replace_insn(env, off, insn);
offload->opt_failed |= ret;
}
up_read(&bpf_devs_lock);
}
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{
struct bpf_prog_offload *offload;
int ret = -EOPNOTSUPP;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload) {
if (!offload->opt_failed && offload->offdev->ops->remove_insns)
ret = offload->offdev->ops->remove_insns(env, off, cnt);
offload->opt_failed |= ret;
}
up_read(&bpf_devs_lock);
}
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;

Просмотреть файл

@ -6432,6 +6432,153 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
return new_prog;
}
static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
u32 off, u32 cnt)
{
int i, j;
/* find first prog starting at or after off (first to remove) */
for (i = 0; i < env->subprog_cnt; i++)
if (env->subprog_info[i].start >= off)
break;
/* find first prog starting at or after off + cnt (first to stay) */
for (j = i; j < env->subprog_cnt; j++)
if (env->subprog_info[j].start >= off + cnt)
break;
/* if j doesn't start exactly at off + cnt, we are just removing
* the front of previous prog
*/
if (env->subprog_info[j].start != off + cnt)
j--;
if (j > i) {
struct bpf_prog_aux *aux = env->prog->aux;
int move;
/* move fake 'exit' subprog as well */
move = env->subprog_cnt + 1 - j;
memmove(env->subprog_info + i,
env->subprog_info + j,
sizeof(*env->subprog_info) * move);
env->subprog_cnt -= j - i;
/* remove func_info */
if (aux->func_info) {
move = aux->func_info_cnt - j;
memmove(aux->func_info + i,
aux->func_info + j,
sizeof(*aux->func_info) * move);
aux->func_info_cnt -= j - i;
/* func_info->insn_off is set after all code rewrites,
* in adjust_btf_func() - no need to adjust
*/
}
} else {
/* convert i from "first prog to remove" to "first to adjust" */
if (env->subprog_info[i].start == off)
i++;
}
/* update fake 'exit' subprog as well */
for (; i <= env->subprog_cnt; i++)
env->subprog_info[i].start -= cnt;
return 0;
}
static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
u32 cnt)
{
struct bpf_prog *prog = env->prog;
u32 i, l_off, l_cnt, nr_linfo;
struct bpf_line_info *linfo;
nr_linfo = prog->aux->nr_linfo;
if (!nr_linfo)
return 0;
linfo = prog->aux->linfo;
/* find first line info to remove, count lines to be removed */
for (i = 0; i < nr_linfo; i++)
if (linfo[i].insn_off >= off)
break;
l_off = i;
l_cnt = 0;
for (; i < nr_linfo; i++)
if (linfo[i].insn_off < off + cnt)
l_cnt++;
else
break;
/* First live insn doesn't match first live linfo, it needs to "inherit"
* last removed linfo. prog is already modified, so prog->len == off
* means no live instructions after (tail of the program was removed).
*/
if (prog->len != off && l_cnt &&
(i == nr_linfo || linfo[i].insn_off != off + cnt)) {
l_cnt--;
linfo[--i].insn_off = off + cnt;
}
/* remove the line info which refer to the removed instructions */
if (l_cnt) {
memmove(linfo + l_off, linfo + i,
sizeof(*linfo) * (nr_linfo - i));
prog->aux->nr_linfo -= l_cnt;
nr_linfo = prog->aux->nr_linfo;
}
/* pull all linfo[i].insn_off >= off + cnt in by cnt */
for (i = l_off; i < nr_linfo; i++)
linfo[i].insn_off -= cnt;
/* fix up all subprogs (incl. 'exit') which start >= off */
for (i = 0; i <= env->subprog_cnt; i++)
if (env->subprog_info[i].linfo_idx > l_off) {
/* program may have started in the removed region but
* may not be fully removed
*/
if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
env->subprog_info[i].linfo_idx -= l_cnt;
else
env->subprog_info[i].linfo_idx = l_off;
}
return 0;
}
static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
unsigned int orig_prog_len = env->prog->len;
int err;
if (bpf_prog_is_dev_bound(env->prog->aux))
bpf_prog_offload_remove_insns(env, off, cnt);
err = bpf_remove_insns(env->prog, off, cnt);
if (err)
return err;
err = adjust_subprog_starts_after_remove(env, off, cnt);
if (err)
return err;
err = bpf_adj_linfo_after_remove(env, off, cnt);
if (err)
return err;
memmove(aux_data + off, aux_data + off + cnt,
sizeof(*aux_data) * (orig_prog_len - off - cnt));
return 0;
}
/* The verifier does more data flow analysis than llvm and will not
* explore branches that are dead at run time. Malicious programs can
* have dead code too. Therefore replace all dead at-run-time code
@ -6458,6 +6605,88 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
}
}
static bool insn_is_cond_jump(u8 code)
{
u8 op;
if (BPF_CLASS(code) != BPF_JMP)
return false;
op = BPF_OP(code);
return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
}
static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
struct bpf_insn *insn = env->prog->insnsi;
const int insn_cnt = env->prog->len;
int i;
for (i = 0; i < insn_cnt; i++, insn++) {
if (!insn_is_cond_jump(insn->code))
continue;
if (!aux_data[i + 1].seen)
ja.off = insn->off;
else if (!aux_data[i + 1 + insn->off].seen)
ja.off = 0;
else
continue;
if (bpf_prog_is_dev_bound(env->prog->aux))
bpf_prog_offload_replace_insn(env, i, &ja);
memcpy(insn, &ja, sizeof(ja));
}
}
static int opt_remove_dead_code(struct bpf_verifier_env *env)
{
struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
int insn_cnt = env->prog->len;
int i, err;
for (i = 0; i < insn_cnt; i++) {
int j;
j = 0;
while (i + j < insn_cnt && !aux_data[i + j].seen)
j++;
if (!j)
continue;
err = verifier_remove_insns(env, i, j);
if (err)
return err;
insn_cnt = env->prog->len;
}
return 0;
}
static int opt_remove_nops(struct bpf_verifier_env *env)
{
const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
int i, err;
for (i = 0; i < insn_cnt; i++) {
if (memcmp(&insn[i], &ja, sizeof(ja)))
continue;
err = verifier_remove_insns(env, i, 1);
if (err)
return err;
insn_cnt--;
i--;
}
return 0;
}
/* convert load instructions that access fields of a context type into a
* sequence of instructions that access fields of the underlying structure:
* struct __sk_buff -> struct sk_buff
@ -7148,7 +7377,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
int ret = -EINVAL;
int i, len, ret = -EINVAL;
bool is_priv;
/* no program is valid */
if (ARRAY_SIZE(bpf_verifier_ops) == 0)
@ -7162,12 +7392,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
return -ENOMEM;
log = &env->log;
len = (*prog)->len;
env->insn_aux_data =
vzalloc(array_size(sizeof(struct bpf_insn_aux_data),
(*prog)->len));
vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
ret = -ENOMEM;
if (!env->insn_aux_data)
goto err_free_env;
for (i = 0; i < len; i++)
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
@ -7195,6 +7427,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
env->strict_alignment = false;
is_priv = capable(CAP_SYS_ADMIN);
env->allow_ptr_leaks = is_priv;
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
@ -7212,8 +7447,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (!env->explored_states)
goto skip_full_check;
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
ret = check_subprogs(env);
if (ret < 0)
goto skip_full_check;
@ -7243,8 +7476,17 @@ skip_full_check:
ret = check_max_stack_depth(env);
/* instruction rewrites happen after this point */
if (ret == 0)
sanitize_dead_code(env);
if (is_priv) {
if (ret == 0)
opt_hard_wire_dead_code_branches(env);
if (ret == 0)
ret = opt_remove_dead_code(env);
if (ret == 0)
ret = opt_remove_nops(env);
} else {
if (ret == 0)
sanitize_dead_code(env);
}
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */

Просмотреть файл

@ -4293,6 +4293,10 @@ static struct prog_info_raw_test {
__u32 line_info_rec_size;
__u32 nr_jited_ksyms;
bool expected_prog_load_failure;
__u32 dead_code_cnt;
__u32 dead_code_mask;
__u32 dead_func_cnt;
__u32 dead_func_mask;
} info_raw_tests[] = {
{
.descr = "func_type (main func + one sub)",
@ -4719,6 +4723,369 @@ static struct prog_info_raw_test {
.expected_prog_load_failure = true,
},
{
.descr = "line_info (dead start)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 1,
.dead_code_mask = 0x01,
},
{
.descr = "line_info (dead end)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 0,
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12),
BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11),
BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 2,
.dead_code_mask = 0x28,
},
{
.descr = "line_info (dead code + subprog + func_info)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {14, 3} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8),
BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 9,
.dead_code_mask = 0x3fe,
},
{
.descr = "line_info (dead subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
"\0return 0;\0return 0;\0/* dead */\0/* dead */"
"\0/* dead */\0return bla + 1;\0return bla + 1;"
"\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {6, 3}, {9, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 3,
.dead_code_mask = 0x70,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead last subprog)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */"
"\0return 0;\0/* dead */\0/* dead */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {5, 3} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 1,
.dead_code_cnt = 2,
.dead_code_mask = 0x18,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead subprog + dead start)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */"
"\0return 0;\0return 0;\0return 0;"
"\0/* dead */\0/* dead */\0/* dead */\0/* dead */"
"\0return b + 1;\0return b + 1;\0return b + 1;"),
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {7, 3}, {10, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 5,
.dead_code_mask = 0x1e2,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead subprog + dead start w/ move)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */"
"\0return 0;\0return 0;\0/* dead */\0/* dead */"
"\0/* dead */\0return bla + 1;\0return bla + 1;"
"\0return bla + 1;\0return func(a);\0b+=1;\0return b;"),
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
BPF_CALL_REL(3),
BPF_CALL_REL(5),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_CALL_REL(1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 3,
.func_info_rec_size = 8,
.func_info = { {0, 4}, {6, 3}, {9, 5} },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
.dead_code_cnt = 3,
.dead_code_mask = 0x70,
.dead_func_cnt = 1,
.dead_func_mask = 0x2,
},
{
.descr = "line_info (dead end + subprog start w/ no linfo)",
.raw_types = {
BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
BTF_END_RAW,
},
BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"),
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3),
BPF_CALL_REL(3),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.func_info_cnt = 2,
.func_info_rec_size = 8,
.func_info = { {0, 3}, {6, 4}, },
.line_info = {
BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10),
BTF_END_RAW,
},
.line_info_rec_size = sizeof(struct bpf_line_info),
.nr_jited_ksyms = 2,
},
};
static size_t probe_prog_length(const struct bpf_insn *fp)
@ -4778,6 +5145,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
struct bpf_func_info *finfo;
__u32 info_len, rec_size, i;
void *func_info = NULL;
__u32 nr_func_info;
int err;
/* get necessary lens */
@ -4787,7 +5155,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
fprintf(stderr, "%s\n", btf_log_buf);
return -1;
}
if (CHECK(info.nr_func_info != test->func_info_cnt,
nr_func_info = test->func_info_cnt - test->dead_func_cnt;
if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (1st) %d",
info.nr_func_info)) {
return -1;
@ -4808,7 +5177,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
/* reset info to only retrieve func_info related data */
memset(&info, 0, sizeof(info));
info.nr_func_info = test->func_info_cnt;
info.nr_func_info = nr_func_info;
info.func_info_rec_size = rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
@ -4817,7 +5186,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
err = -1;
goto done;
}
if (CHECK(info.nr_func_info != test->func_info_cnt,
if (CHECK(info.nr_func_info != nr_func_info,
"incorrect info.nr_func_info (2nd) %d",
info.nr_func_info)) {
err = -1;
@ -4831,7 +5200,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test,
}
finfo = func_info;
for (i = 0; i < test->func_info_cnt; i++) {
for (i = 0; i < nr_func_info; i++) {
if (test->dead_func_mask & (1 << i))
continue;
if (CHECK(finfo->type_id != test->func_info[i][1],
"incorrect func_type %u expected %u",
finfo->type_id, test->func_info[i][1])) {
@ -4860,6 +5231,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
struct bpf_prog_info info = {};
__u32 *jited_func_lens = NULL;
__u64 cur_func_ksyms;
__u32 dead_insns;
int err;
jited_cnt = cnt;
@ -4868,7 +5240,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
if (test->nr_jited_ksyms)
nr_jited_ksyms = test->nr_jited_ksyms;
else
nr_jited_ksyms = test->func_info_cnt;
nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt;
nr_jited_func_lens = nr_jited_ksyms;
info_len = sizeof(struct bpf_prog_info);
@ -4970,12 +5342,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test,
goto done;
}
dead_insns = 0;
while (test->dead_code_mask & (1 << dead_insns))
dead_insns++;
CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
linfo[0].insn_off);
for (i = 1; i < cnt; i++) {
const struct bpf_line_info *expected_linfo;
expected_linfo = patched_linfo + (i * test->line_info_rec_size);
while (test->dead_code_mask & (1 << (i + dead_insns)))
dead_insns++;
expected_linfo = patched_linfo +
((i + dead_insns) * test->line_info_rec_size);
if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
"linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
i, linfo[i].insn_off,
@ -5133,7 +5513,9 @@ static int do_test_info_raw(unsigned int test_num)
if (err)
goto done;
err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
err = test_get_linfo(test, patched_linfo,
attr.line_info_cnt - test->dead_code_cnt,
prog_fd);
if (err)
goto done;

Просмотреть файл

@ -15599,6 +15599,166 @@ static struct bpf_test tests[] = {
.result_unpriv = ACCEPT,
.result = ACCEPT,
},
{
"dead code: start",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"dead code: end 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + func",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + two functions",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: function in the middle and mid of another func",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: middle of main before call",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
{
"dead code: start of a function",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
};
static int probe_filter_length(const struct bpf_insn *fp)