Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2020-04-24 The following pull-request contains BPF updates for your *net* tree. We've added 17 non-merge commits during the last 5 day(s) which contain a total of 19 files changed, 203 insertions(+), 85 deletions(-). The main changes are: 1) link_update fix, from Andrii. 2) libbpf get_xdp_id fix, from David. 3) xadd verifier fix, from Jann. 4) x86-32 JIT fixes, from Luke and Wang. 5) test_btf fix, from Stanislav. 6) freplace verifier fix, from Toke. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
167ff131cb
|
@ -158,6 +158,19 @@ static bool is_ereg(u32 reg)
|
||||||
BIT(BPF_REG_AX));
|
BIT(BPF_REG_AX));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
|
||||||
|
* lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
|
||||||
|
* of encoding. al,cl,dl,bl have simpler encoding.
|
||||||
|
*/
|
||||||
|
static bool is_ereg_8l(u32 reg)
|
||||||
|
{
|
||||||
|
return is_ereg(reg) ||
|
||||||
|
(1 << reg) & (BIT(BPF_REG_1) |
|
||||||
|
BIT(BPF_REG_2) |
|
||||||
|
BIT(BPF_REG_FP));
|
||||||
|
}
|
||||||
|
|
||||||
static bool is_axreg(u32 reg)
|
static bool is_axreg(u32 reg)
|
||||||
{
|
{
|
||||||
return reg == BPF_REG_0;
|
return reg == BPF_REG_0;
|
||||||
|
@ -598,9 +611,8 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
|
||||||
switch (size) {
|
switch (size) {
|
||||||
case BPF_B:
|
case BPF_B:
|
||||||
/* Emit 'mov byte ptr [rax + off], al' */
|
/* Emit 'mov byte ptr [rax + off], al' */
|
||||||
if (is_ereg(dst_reg) || is_ereg(src_reg) ||
|
if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
|
||||||
/* We have to add extra byte for x86 SIL, DIL regs */
|
/* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
|
||||||
src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
|
|
||||||
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
|
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
|
||||||
else
|
else
|
||||||
EMIT1(0x88);
|
EMIT1(0x88);
|
||||||
|
|
|
@ -1847,14 +1847,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
case BPF_B:
|
case BPF_B:
|
||||||
case BPF_H:
|
case BPF_H:
|
||||||
case BPF_W:
|
case BPF_W:
|
||||||
if (!bpf_prog->aux->verifier_zext)
|
if (bpf_prog->aux->verifier_zext)
|
||||||
break;
|
break;
|
||||||
if (dstk) {
|
if (dstk) {
|
||||||
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
|
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
|
||||||
STACK_VAR(dst_hi));
|
STACK_VAR(dst_hi));
|
||||||
EMIT(0x0, 4);
|
EMIT(0x0, 4);
|
||||||
} else {
|
} else {
|
||||||
EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
|
/* xor dst_hi,dst_hi */
|
||||||
|
EMIT2(0x33,
|
||||||
|
add_2reg(0xC0, dst_hi, dst_hi));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BPF_DW:
|
case BPF_DW:
|
||||||
|
@ -2013,8 +2015,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
case BPF_JMP | BPF_JSET | BPF_X:
|
case BPF_JMP | BPF_JSET | BPF_X:
|
||||||
case BPF_JMP32 | BPF_JSET | BPF_X: {
|
case BPF_JMP32 | BPF_JSET | BPF_X: {
|
||||||
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
|
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
|
||||||
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
|
u8 dreg_lo = IA32_EAX;
|
||||||
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
|
u8 dreg_hi = IA32_EDX;
|
||||||
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
|
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
|
||||||
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
|
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
|
||||||
|
|
||||||
|
@ -2026,6 +2028,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
add_2reg(0x40, IA32_EBP,
|
add_2reg(0x40, IA32_EBP,
|
||||||
IA32_EDX),
|
IA32_EDX),
|
||||||
STACK_VAR(dst_hi));
|
STACK_VAR(dst_hi));
|
||||||
|
} else {
|
||||||
|
/* mov dreg_lo,dst_lo */
|
||||||
|
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
|
||||||
|
if (is_jmp64)
|
||||||
|
/* mov dreg_hi,dst_hi */
|
||||||
|
EMIT2(0x89,
|
||||||
|
add_2reg(0xC0, dreg_hi, dst_hi));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sstk) {
|
if (sstk) {
|
||||||
|
@ -2050,8 +2059,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
case BPF_JMP | BPF_JSET | BPF_K:
|
case BPF_JMP | BPF_JSET | BPF_K:
|
||||||
case BPF_JMP32 | BPF_JSET | BPF_K: {
|
case BPF_JMP32 | BPF_JSET | BPF_K: {
|
||||||
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
|
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
|
||||||
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
|
u8 dreg_lo = IA32_EAX;
|
||||||
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
|
u8 dreg_hi = IA32_EDX;
|
||||||
u8 sreg_lo = IA32_ECX;
|
u8 sreg_lo = IA32_ECX;
|
||||||
u8 sreg_hi = IA32_EBX;
|
u8 sreg_hi = IA32_EBX;
|
||||||
u32 hi;
|
u32 hi;
|
||||||
|
@ -2064,6 +2073,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||||
add_2reg(0x40, IA32_EBP,
|
add_2reg(0x40, IA32_EBP,
|
||||||
IA32_EDX),
|
IA32_EDX),
|
||||||
STACK_VAR(dst_hi));
|
STACK_VAR(dst_hi));
|
||||||
|
} else {
|
||||||
|
/* mov dreg_lo,dst_lo */
|
||||||
|
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
|
||||||
|
if (is_jmp64)
|
||||||
|
/* mov dreg_hi,dst_hi */
|
||||||
|
EMIT2(0x89,
|
||||||
|
add_2reg(0xC0, dreg_hi, dst_hi));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mov ecx,imm32 */
|
/* mov ecx,imm32 */
|
||||||
|
|
|
@ -1642,7 +1642,7 @@ union bpf_attr {
|
||||||
* ifindex, but doesn't require a map to do so.
|
* ifindex, but doesn't require a map to do so.
|
||||||
* Return
|
* Return
|
||||||
* **XDP_REDIRECT** on success, or the value of the two lower bits
|
* **XDP_REDIRECT** on success, or the value of the two lower bits
|
||||||
* of the **flags* argument on error.
|
* of the *flags* argument on error.
|
||||||
*
|
*
|
||||||
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
|
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
|
|
@ -469,7 +469,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
/* Make sure CPU is a valid possible cpu */
|
/* Make sure CPU is a valid possible cpu */
|
||||||
if (!cpu_possible(key_cpu))
|
if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (qsize == 0) {
|
if (qsize == 0) {
|
||||||
|
|
|
@ -2283,7 +2283,7 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const struct file_operations bpf_link_fops = {
|
static const struct file_operations bpf_link_fops = {
|
||||||
#ifdef CONFIG_PROC_FS
|
#ifdef CONFIG_PROC_FS
|
||||||
.show_fdinfo = bpf_link_show_fdinfo,
|
.show_fdinfo = bpf_link_show_fdinfo,
|
||||||
#endif
|
#endif
|
||||||
|
@ -3628,8 +3628,10 @@ static int link_update(union bpf_attr *attr)
|
||||||
return PTR_ERR(link);
|
return PTR_ERR(link);
|
||||||
|
|
||||||
new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
|
new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
|
||||||
if (IS_ERR(new_prog))
|
if (IS_ERR(new_prog)) {
|
||||||
return PTR_ERR(new_prog);
|
ret = PTR_ERR(new_prog);
|
||||||
|
goto out_put_link;
|
||||||
|
}
|
||||||
|
|
||||||
if (flags & BPF_F_REPLACE) {
|
if (flags & BPF_F_REPLACE) {
|
||||||
old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
|
old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
|
||||||
|
@ -3638,6 +3640,9 @@ static int link_update(union bpf_attr *attr)
|
||||||
old_prog = NULL;
|
old_prog = NULL;
|
||||||
goto out_put_progs;
|
goto out_put_progs;
|
||||||
}
|
}
|
||||||
|
} else if (attr->link_update.old_prog_fd) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_put_progs;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CGROUP_BPF
|
#ifdef CONFIG_CGROUP_BPF
|
||||||
|
@ -3653,6 +3658,8 @@ out_put_progs:
|
||||||
bpf_prog_put(old_prog);
|
bpf_prog_put(old_prog);
|
||||||
if (ret)
|
if (ret)
|
||||||
bpf_prog_put(new_prog);
|
bpf_prog_put(new_prog);
|
||||||
|
out_put_link:
|
||||||
|
bpf_link_put(link);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2118,6 +2118,15 @@ static bool register_is_const(struct bpf_reg_state *reg)
|
||||||
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
|
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool __is_pointer_value(bool allow_ptr_leaks,
|
||||||
|
const struct bpf_reg_state *reg)
|
||||||
|
{
|
||||||
|
if (allow_ptr_leaks)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return reg->type != SCALAR_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
static void save_register_state(struct bpf_func_state *state,
|
static void save_register_state(struct bpf_func_state *state,
|
||||||
int spi, struct bpf_reg_state *reg)
|
int spi, struct bpf_reg_state *reg)
|
||||||
{
|
{
|
||||||
|
@ -2308,6 +2317,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
|
||||||
* which resets stack/reg liveness for state transitions
|
* which resets stack/reg liveness for state transitions
|
||||||
*/
|
*/
|
||||||
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
|
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
|
||||||
|
} else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
|
||||||
|
/* If value_regno==-1, the caller is asking us whether
|
||||||
|
* it is acceptable to use this value as a SCALAR_VALUE
|
||||||
|
* (e.g. for XADD).
|
||||||
|
* We must not allow unprivileged callers to do that
|
||||||
|
* with spilled pointers.
|
||||||
|
*/
|
||||||
|
verbose(env, "leaking pointer from stack off %d\n",
|
||||||
|
off);
|
||||||
|
return -EACCES;
|
||||||
}
|
}
|
||||||
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
|
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
|
||||||
} else {
|
} else {
|
||||||
|
@ -2673,15 +2692,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __is_pointer_value(bool allow_ptr_leaks,
|
|
||||||
const struct bpf_reg_state *reg)
|
|
||||||
{
|
|
||||||
if (allow_ptr_leaks)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return reg->type != SCALAR_VALUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
|
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
|
||||||
{
|
{
|
||||||
return cur_regs(env) + regno;
|
return cur_regs(env) + regno;
|
||||||
|
@ -3089,7 +3099,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (atype == BPF_READ) {
|
if (atype == BPF_READ && value_regno >= 0) {
|
||||||
if (ret == SCALAR_VALUE) {
|
if (ret == SCALAR_VALUE) {
|
||||||
mark_reg_unknown(env, regs, value_regno);
|
mark_reg_unknown(env, regs, value_regno);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -10487,6 +10497,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
env->ops = bpf_verifier_ops[tgt_prog->type];
|
env->ops = bpf_verifier_ops[tgt_prog->type];
|
||||||
|
prog->expected_attach_type = tgt_prog->expected_attach_type;
|
||||||
}
|
}
|
||||||
if (!tgt_prog->jited) {
|
if (!tgt_prog->jited) {
|
||||||
verbose(env, "Can attach to only JITed progs\n");
|
verbose(env, "Can attach to only JITed progs\n");
|
||||||
|
@ -10831,6 +10842,13 @@ err_release_maps:
|
||||||
* them now. Otherwise free_used_maps() will release them.
|
* them now. Otherwise free_used_maps() will release them.
|
||||||
*/
|
*/
|
||||||
release_maps(env);
|
release_maps(env);
|
||||||
|
|
||||||
|
/* extension progs temporarily inherit the attach_type of their targets
|
||||||
|
for verification purposes, so set it back to zero before returning
|
||||||
|
*/
|
||||||
|
if (env->prog->type == BPF_PROG_TYPE_EXT)
|
||||||
|
env->prog->expected_attach_type = 0;
|
||||||
|
|
||||||
*prog = env->prog;
|
*prog = env->prog;
|
||||||
err_unlock:
|
err_unlock:
|
||||||
if (!is_priv)
|
if (!is_priv)
|
||||||
|
|
|
@ -479,6 +479,7 @@ static int do_unregister(int argc, char **argv)
|
||||||
|
|
||||||
static int do_register(int argc, char **argv)
|
static int do_register(int argc, char **argv)
|
||||||
{
|
{
|
||||||
|
struct bpf_object_load_attr load_attr = {};
|
||||||
const struct bpf_map_def *def;
|
const struct bpf_map_def *def;
|
||||||
struct bpf_map_info info = {};
|
struct bpf_map_info info = {};
|
||||||
__u32 info_len = sizeof(info);
|
__u32 info_len = sizeof(info);
|
||||||
|
@ -499,7 +500,12 @@ static int do_register(int argc, char **argv)
|
||||||
|
|
||||||
set_max_rlimit();
|
set_max_rlimit();
|
||||||
|
|
||||||
if (bpf_object__load(obj)) {
|
load_attr.obj = obj;
|
||||||
|
if (verifier_logs)
|
||||||
|
/* log_level1 + log_level2 + stats, but not stable UAPI */
|
||||||
|
load_attr.log_level = 1 + 2 + 4;
|
||||||
|
|
||||||
|
if (bpf_object__load_xattr(&load_attr)) {
|
||||||
bpf_object__close(obj);
|
bpf_object__close(obj);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,7 @@ BPFTOOL ?= $(DEFAULT_BPFTOOL)
|
||||||
LIBBPF_SRC := $(abspath ../../lib/bpf)
|
LIBBPF_SRC := $(abspath ../../lib/bpf)
|
||||||
BPFOBJ := $(OUTPUT)/libbpf.a
|
BPFOBJ := $(OUTPUT)/libbpf.a
|
||||||
BPF_INCLUDE := $(OUTPUT)
|
BPF_INCLUDE := $(OUTPUT)
|
||||||
INCLUDES := -I$(BPF_INCLUDE) -I$(OUTPUT) -I$(abspath ../../lib)
|
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib)
|
||||||
CFLAGS := -g -Wall
|
CFLAGS := -g -Wall
|
||||||
|
|
||||||
# Try to detect best kernel BTF source
|
# Try to detect best kernel BTF source
|
||||||
|
|
|
@ -1642,7 +1642,7 @@ union bpf_attr {
|
||||||
* ifindex, but doesn't require a map to do so.
|
* ifindex, but doesn't require a map to do so.
|
||||||
* Return
|
* Return
|
||||||
* **XDP_REDIRECT** on success, or the value of the two lower bits
|
* **XDP_REDIRECT** on success, or the value of the two lower bits
|
||||||
* of the **flags* argument on error.
|
* of the *flags* argument on error.
|
||||||
*
|
*
|
||||||
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
|
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
|
||||||
* Description
|
* Description
|
||||||
|
|
|
@ -321,6 +321,8 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
|
||||||
|
|
||||||
static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
|
static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
|
||||||
{
|
{
|
||||||
|
flags &= XDP_FLAGS_MODES;
|
||||||
|
|
||||||
if (info->attach_mode != XDP_ATTACHED_MULTI && !flags)
|
if (info->attach_mode != XDP_ATTACHED_MULTI && !flags)
|
||||||
return info->prog_id;
|
return info->prog_id;
|
||||||
if (flags & XDP_FLAGS_DRV_MODE)
|
if (flags & XDP_FLAGS_DRV_MODE)
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
static void test_fexit_bpf2bpf_common(const char *obj_file,
|
static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||||
const char *target_obj_file,
|
const char *target_obj_file,
|
||||||
int prog_cnt,
|
int prog_cnt,
|
||||||
const char **prog_name)
|
const char **prog_name,
|
||||||
|
bool run_prog)
|
||||||
{
|
{
|
||||||
struct bpf_object *obj = NULL, *pkt_obj;
|
struct bpf_object *obj = NULL, *pkt_obj;
|
||||||
int err, pkt_fd, i;
|
int err, pkt_fd, i;
|
||||||
|
@ -18,7 +19,8 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||||
|
|
||||||
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
|
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
|
||||||
&pkt_obj, &pkt_fd);
|
&pkt_obj, &pkt_fd);
|
||||||
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
|
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
|
||||||
|
target_obj_file, err, errno))
|
||||||
return;
|
return;
|
||||||
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
|
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
|
||||||
.attach_prog_fd = pkt_fd,
|
.attach_prog_fd = pkt_fd,
|
||||||
|
@ -33,7 +35,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||||
|
|
||||||
obj = bpf_object__open_file(obj_file, &opts);
|
obj = bpf_object__open_file(obj_file, &opts);
|
||||||
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
|
if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
|
||||||
"failed to open fexit_bpf2bpf: %ld\n",
|
"failed to open %s: %ld\n", obj_file,
|
||||||
PTR_ERR(obj)))
|
PTR_ERR(obj)))
|
||||||
goto close_prog;
|
goto close_prog;
|
||||||
|
|
||||||
|
@ -49,6 +51,10 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
|
||||||
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
|
if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n"))
|
||||||
goto close_prog;
|
goto close_prog;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!run_prog)
|
||||||
|
goto close_prog;
|
||||||
|
|
||||||
data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
|
data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
|
||||||
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
|
if (CHECK(!data_map, "find_data_map", "data map not found\n"))
|
||||||
goto close_prog;
|
goto close_prog;
|
||||||
|
@ -89,7 +95,7 @@ static void test_target_no_callees(void)
|
||||||
test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o",
|
test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o",
|
||||||
"./test_pkt_md_access.o",
|
"./test_pkt_md_access.o",
|
||||||
ARRAY_SIZE(prog_name),
|
ARRAY_SIZE(prog_name),
|
||||||
prog_name);
|
prog_name, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_target_yes_callees(void)
|
static void test_target_yes_callees(void)
|
||||||
|
@ -103,7 +109,7 @@ static void test_target_yes_callees(void)
|
||||||
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
|
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
|
||||||
"./test_pkt_access.o",
|
"./test_pkt_access.o",
|
||||||
ARRAY_SIZE(prog_name),
|
ARRAY_SIZE(prog_name),
|
||||||
prog_name);
|
prog_name, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_func_replace(void)
|
static void test_func_replace(void)
|
||||||
|
@ -120,7 +126,18 @@ static void test_func_replace(void)
|
||||||
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
|
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
|
||||||
"./test_pkt_access.o",
|
"./test_pkt_access.o",
|
||||||
ARRAY_SIZE(prog_name),
|
ARRAY_SIZE(prog_name),
|
||||||
prog_name);
|
prog_name, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_func_replace_verify(void)
|
||||||
|
{
|
||||||
|
const char *prog_name[] = {
|
||||||
|
"freplace/do_bind",
|
||||||
|
};
|
||||||
|
test_fexit_bpf2bpf_common("./freplace_connect4.o",
|
||||||
|
"./connect4_prog.o",
|
||||||
|
ARRAY_SIZE(prog_name),
|
||||||
|
prog_name, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_fexit_bpf2bpf(void)
|
void test_fexit_bpf2bpf(void)
|
||||||
|
@ -128,4 +145,5 @@ void test_fexit_bpf2bpf(void)
|
||||||
test_target_no_callees();
|
test_target_no_callees();
|
||||||
test_target_yes_callees();
|
test_target_yes_callees();
|
||||||
test_func_replace();
|
test_func_replace();
|
||||||
|
test_func_replace_verify();
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,25 @@
|
||||||
|
|
||||||
int _version SEC("version") = 1;
|
int _version SEC("version") = 1;
|
||||||
|
|
||||||
|
__attribute__ ((noinline))
|
||||||
|
int do_bind(struct bpf_sock_addr *ctx)
|
||||||
|
{
|
||||||
|
struct sockaddr_in sa = {};
|
||||||
|
|
||||||
|
sa.sin_family = AF_INET;
|
||||||
|
sa.sin_port = bpf_htons(0);
|
||||||
|
sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4);
|
||||||
|
|
||||||
|
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
SEC("cgroup/connect4")
|
SEC("cgroup/connect4")
|
||||||
int connect_v4_prog(struct bpf_sock_addr *ctx)
|
int connect_v4_prog(struct bpf_sock_addr *ctx)
|
||||||
{
|
{
|
||||||
struct bpf_sock_tuple tuple = {};
|
struct bpf_sock_tuple tuple = {};
|
||||||
struct sockaddr_in sa;
|
|
||||||
struct bpf_sock *sk;
|
struct bpf_sock *sk;
|
||||||
|
|
||||||
/* Verify that new destination is available. */
|
/* Verify that new destination is available. */
|
||||||
|
@ -56,17 +70,7 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
|
||||||
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
|
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
|
||||||
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);
|
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);
|
||||||
|
|
||||||
/* Rewrite source. */
|
return do_bind(ctx) ? 1 : 0;
|
||||||
memset(&sa, 0, sizeof(sa));
|
|
||||||
|
|
||||||
sa.sin_family = AF_INET;
|
|
||||||
sa.sin_port = bpf_htons(0);
|
|
||||||
sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4);
|
|
||||||
|
|
||||||
if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
#include <linux/stddef.h>
|
||||||
|
#include <linux/ipv6.h>
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <linux/in.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
#include <bpf/bpf_endian.h>
|
||||||
|
|
||||||
|
SEC("freplace/do_bind")
|
||||||
|
int new_do_bind(struct bpf_sock_addr *ctx)
|
||||||
|
{
|
||||||
|
struct sockaddr_in sa = {};
|
||||||
|
|
||||||
|
bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
|
@ -20,20 +20,12 @@ struct bpf_map_def SEC("maps") btf_map = {
|
||||||
|
|
||||||
BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
|
BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts);
|
||||||
|
|
||||||
struct dummy_tracepoint_args {
|
|
||||||
unsigned long long pad;
|
|
||||||
struct sock *sock;
|
|
||||||
};
|
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
int test_long_fname_2(void)
|
||||||
{
|
{
|
||||||
struct ipv_counts *counts;
|
struct ipv_counts *counts;
|
||||||
int key = 0;
|
int key = 0;
|
||||||
|
|
||||||
if (!arg->sock)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
counts = bpf_map_lookup_elem(&btf_map, &key);
|
counts = bpf_map_lookup_elem(&btf_map, &key);
|
||||||
if (!counts)
|
if (!counts)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -44,15 +36,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_1(struct dummy_tracepoint_args *arg)
|
int test_long_fname_1(void)
|
||||||
{
|
{
|
||||||
return test_long_fname_2(arg);
|
return test_long_fname_2();
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC("dummy_tracepoint")
|
SEC("dummy_tracepoint")
|
||||||
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
|
int _dummy_tracepoint(void *arg)
|
||||||
{
|
{
|
||||||
return test_long_fname_1(arg);
|
return test_long_fname_1();
|
||||||
}
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
|
@ -28,20 +28,12 @@ struct {
|
||||||
__type(value, struct ipv_counts);
|
__type(value, struct ipv_counts);
|
||||||
} btf_map SEC(".maps");
|
} btf_map SEC(".maps");
|
||||||
|
|
||||||
struct dummy_tracepoint_args {
|
|
||||||
unsigned long long pad;
|
|
||||||
struct sock *sock;
|
|
||||||
};
|
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
int test_long_fname_2(void)
|
||||||
{
|
{
|
||||||
struct ipv_counts *counts;
|
struct ipv_counts *counts;
|
||||||
int key = 0;
|
int key = 0;
|
||||||
|
|
||||||
if (!arg->sock)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
counts = bpf_map_lookup_elem(&btf_map, &key);
|
counts = bpf_map_lookup_elem(&btf_map, &key);
|
||||||
if (!counts)
|
if (!counts)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -57,15 +49,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_1(struct dummy_tracepoint_args *arg)
|
int test_long_fname_1(void)
|
||||||
{
|
{
|
||||||
return test_long_fname_2(arg);
|
return test_long_fname_2();
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC("dummy_tracepoint")
|
SEC("dummy_tracepoint")
|
||||||
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
|
int _dummy_tracepoint(void *arg)
|
||||||
{
|
{
|
||||||
return test_long_fname_1(arg);
|
return test_long_fname_1();
|
||||||
}
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
|
@ -17,20 +17,12 @@ struct bpf_map_def SEC("maps") btf_map = {
|
||||||
.max_entries = 4,
|
.max_entries = 4,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dummy_tracepoint_args {
|
|
||||||
unsigned long long pad;
|
|
||||||
struct sock *sock;
|
|
||||||
};
|
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
int test_long_fname_2(void)
|
||||||
{
|
{
|
||||||
struct ipv_counts *counts;
|
struct ipv_counts *counts;
|
||||||
int key = 0;
|
int key = 0;
|
||||||
|
|
||||||
if (!arg->sock)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
counts = bpf_map_lookup_elem(&btf_map, &key);
|
counts = bpf_map_lookup_elem(&btf_map, &key);
|
||||||
if (!counts)
|
if (!counts)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -41,15 +33,15 @@ int test_long_fname_2(struct dummy_tracepoint_args *arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
__attribute__((noinline))
|
__attribute__((noinline))
|
||||||
int test_long_fname_1(struct dummy_tracepoint_args *arg)
|
int test_long_fname_1(void)
|
||||||
{
|
{
|
||||||
return test_long_fname_2(arg);
|
return test_long_fname_2();
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC("dummy_tracepoint")
|
SEC("dummy_tracepoint")
|
||||||
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
|
int _dummy_tracepoint(void *arg)
|
||||||
{
|
{
|
||||||
return test_long_fname_1(arg);
|
return test_long_fname_1();
|
||||||
}
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
|
@ -2854,7 +2854,7 @@ static struct btf_raw_test raw_tests[] = {
|
||||||
.value_type_id = 1,
|
.value_type_id = 1,
|
||||||
.max_entries = 4,
|
.max_entries = 4,
|
||||||
.btf_load_err = true,
|
.btf_load_err = true,
|
||||||
.err_str = "vlen != 0",
|
.err_str = "Invalid func linkage",
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -315,3 +315,43 @@
|
||||||
},
|
},
|
||||||
.result = ACCEPT,
|
.result = ACCEPT,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"store PTR_TO_STACK in R10 to array map using BPF_B",
|
||||||
|
.insns = {
|
||||||
|
/* Load pointer to map. */
|
||||||
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||||
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||||
|
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||||
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||||
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||||
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
|
||||||
|
/* Copy R10 to R9. */
|
||||||
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_10),
|
||||||
|
/* Pollute other registers with unaligned values. */
|
||||||
|
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_3, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_4, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_5, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_6, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_7, -1),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_8, -1),
|
||||||
|
/* Store both R9 and R10 with BPF_B and read back. */
|
||||||
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, 0),
|
||||||
|
BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_1, 0),
|
||||||
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_9, 0),
|
||||||
|
BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_1, 0),
|
||||||
|
/* Should read back as same value. */
|
||||||
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_3, 2),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 42),
|
||||||
|
BPF_EXIT_INSN(),
|
||||||
|
},
|
||||||
|
.fixup_map_array_48b = { 3 },
|
||||||
|
.result = ACCEPT,
|
||||||
|
.retval = 42,
|
||||||
|
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||||
|
},
|
||||||
|
|
|
@ -88,6 +88,7 @@
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
},
|
},
|
||||||
.fixup_map_hash_48b = { 3 },
|
.fixup_map_hash_48b = { 3 },
|
||||||
|
.errstr_unpriv = "leaking pointer from stack off -8",
|
||||||
.errstr = "R0 invalid mem access 'inv'",
|
.errstr = "R0 invalid mem access 'inv'",
|
||||||
.result = REJECT,
|
.result = REJECT,
|
||||||
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
|
||||||
|
|
Загрузка…
Ссылка в новой задаче