selftests/bpf: verifier/subreg converted to inline assembly

Test verifier/subreg automatically converted to use inline assembly.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-22-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Eduard Zingerman 2023-04-21 20:42:31 +03:00 коммит произвёл Alexei Starovoitov
Родитель f323a81806
Коммит 81d1d6dd40
3 изменённых файлов: 675 добавлений и 533 удалений

Просмотреть файл

@ -54,6 +54,7 @@
#include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h"
#include "verifier_subreg.skel.h"
#include "verifier_uninit.skel.h"
#include "verifier_value_adj_spill.skel.h"
#include "verifier_value.skel.h"
@ -147,6 +148,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); }
void test_verifier_value(void) { RUN(verifier_value); }

Просмотреть файл

@ -0,0 +1,673 @@
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
/* This file contains sub-register zero extension checks for insns defining
* sub-registers, meaning:
* - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
* forms (BPF_END) could define sub-registers.
* - Narrow direct loads, BPF_B/H/W | BPF_LDX.
* - BPF_LD is not exposed to JIT back-ends, so no need for testing.
*
* "get_prandom_u32" is used to initialize low 32-bit of some registers to
* prevent potential optimizations done by verifier or JIT back-ends which could
* optimize register back into constant when range info shows one register is a
* constant.
*/
SEC("socket")
__description("add32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void add32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000000 ll; \
w0 += w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("add32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void add32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
/* An insn could have no effect on the low 32-bit, for example:\
* a = a + 0 \
* a = a | 0 \
* a = a & -1 \
* But, they should still zero high 32-bit. \
*/ \
w0 += 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 += -2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("sub32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void sub32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x1ffffffff ll; \
w0 -= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("sub32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void sub32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 -= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 -= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mul32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mul32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000001 ll; \
w0 *= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mul32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mul32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 *= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 *= -1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("div32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void div32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = -1; \
w0 /= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("div32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void div32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 /= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 /= 2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("or32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void or32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000001 ll; \
w0 |= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("or32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void or32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 |= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 |= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("and32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void and32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r1 |= r0; \
r0 = 0x1ffffffff ll; \
w0 &= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("and32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void and32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 &= -1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 &= -2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("lsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void lsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 <<= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("lsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void lsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 <<= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 <<= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("rsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void rsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 >>= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("rsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void rsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 >>= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 >>= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("neg32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void neg32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = -w0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mod32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mod32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = -1; \
w0 %%= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mod32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mod32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 %%= 1; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 %%= 2; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("xor32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void xor32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = r0; \
r0 = 0x100000000 ll; \
w0 ^= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("xor32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void xor32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 ^= 1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mov32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void mov32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x100000000 ll; \
r1 |= r0; \
r0 = 0x100000000 ll; \
w0 = w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("mov32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void mov32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 = 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("arsh32 reg zero extend check")
__success __success_unpriv __retval(0)
__naked void arsh32_reg_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r1 = 1; \
w0 s>>= w1; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("arsh32 imm zero extend check")
__success __success_unpriv __retval(0)
__naked void arsh32_imm_zero_extend_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 s>>= 0; \
r0 >>= 32; \
r6 = r0; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
w0 s>>= 1; \
r0 >>= 32; \
r0 |= r6; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end16 (to_le) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void le_reg_zero_extend_check_1(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = le16 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end32 (to_le) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void le_reg_zero_extend_check_2(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = le32 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end16 (to_be) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void be_reg_zero_extend_check_1(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = be16 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("end32 (to_be) reg zero extend check")
__success __success_unpriv __retval(0)
__naked void be_reg_zero_extend_check_2(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r6 = r0; \
r6 <<= 32; \
call %[bpf_get_prandom_u32]; \
r0 |= r6; \
r0 = be32 r0; \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_b zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_b_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u8*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_h zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_h_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u16*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("ldx_w zero extend check")
__success __success_unpriv __retval(0)
__naked void ldx_w_zero_extend_check(void)
{
asm volatile (" \
r6 = r10; \
r6 += -4; \
r7 = 0xfaceb00c; \
*(u32*)(r6 + 0) = r7; \
call %[bpf_get_prandom_u32]; \
r1 = 0x1000000000 ll; \
r0 |= r1; \
r0 = *(u32*)(r6 + 0); \
r0 >>= 32; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

Просмотреть файл

@ -1,533 +0,0 @@
/* This file contains sub-register zero extension checks for insns defining
* sub-registers, meaning:
* - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width
* forms (BPF_END) could define sub-registers.
* - Narrow direct loads, BPF_B/H/W | BPF_LDX.
* - BPF_LD is not exposed to JIT back-ends, so no need for testing.
*
* "get_prandom_u32" is used to initialize low 32-bit of some registers to
* prevent potential optimizations done by verifier or JIT back-ends which could
* optimize register back into constant when range info shows one register is a
* constant.
*/
{
"add32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"add32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
/* An insn could have no effect on the low 32-bit, for example:
* a = a + 0
* a = a | 0
* a = a & -1
* But, they should still zero high 32-bit.
*/
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"sub32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mul32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"div32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL),
BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"or32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL),
BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"and32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"lsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"rsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"neg32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, -1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mod32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"xor32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0),
BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL),
BPF_MOV32_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"mov32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 imm zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_le) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end16 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"end32 (to_be) reg zero extend check",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6),
BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_b zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_h zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"ldx_w zero extend check",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4),
BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL),
BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},