From eeedd3527d80333fd6587a3db260395f05f312d0 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 15 Jan 2019 13:13:46 -0800 Subject: [PATCH 01/68] libbpf: don't define CC and AR We are already including tools/scripts/Makefile.include which correctly handles CROSS_COMPILE, no need to define our own vars. See related commit 7ed1c1901fe5 ("tools: fix cross-compile var clobbering") for more details. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/lib/bpf/Makefile | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 34d9c3619c96..847916273696 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -14,21 +14,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree))) #$(info Determined 'srctree' to be $(srctree)) endif -# Makefiles suck: This macro sets a default value of $(2) for the -# variable named by $(1), unless the variable has been set by -# environment or command line. This is necessary for CC and AR -# because make sets default values, so the simpler ?= approach -# won't work as expected. -define allow-override - $(if $(or $(findstring environment,$(origin $(1))),\ - $(findstring command line,$(origin $(1)))),,\ - $(eval $(1) = $(2))) -endef - -# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix. -$(call allow-override,CC,$(CROSS_COMPILE)gcc) -$(call allow-override,AR,$(CROSS_COMPILE)ar) - INSTALL = install # Use DESTDIR for installing into a different root directory. @@ -54,7 +39,7 @@ man_dir_SQ = '$(subst ','\'',$(man_dir))' export man_dir man_dir_SQ INSTALL export DESTDIR DESTDIR_SQ -include ../../scripts/Makefile.include +include $(srctree)/tools/scripts/Makefile.include # copy a bit from Linux kbuild From b1e8818cabf407a5a2cec696411b0bdfd7fd12f0 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Jan 2019 17:07:47 -0800 Subject: [PATCH 02/68] bpf: btf: support 128 bit integer type Currently, btf only supports up to 64-bit integer. On the other hand, 128bit support for gcc and clang has existed for a long time. For example, both gcc 4.8 and llvm 3.7 supports types "__int128" and "unsigned __int128" for virtually all 64bit architectures including bpf. The requirement for __int128 support comes from two areas: . bpf program may use __int128. For example, some bcc tools (https://github.com/iovisor/bcc/tree/master/tools), mostly tcp v6 related, tcpstates.py, tcpaccept.py, etc., are using __int128 to represent the ipv6 addresses. . linux itself is using __int128 types. Hence supporting __int128 type in BTF is required for vmlinux BTF, which will be used by "compile once and run everywhere" and other projects. For 128bit integer, instead of base-10, hex numbers are pretty printed out as large decimal number is hard to decipher, e.g., for ipv6 addresses. Acked-by: Martin KaFai Lau Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- kernel/bpf/btf.c | 104 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 85 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a2f53642592b..022ef9ca1296 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -157,7 +157,7 @@ * */ -#define BITS_PER_U64 (sizeof(u64) * BITS_PER_BYTE) +#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) @@ -525,7 +525,7 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) /* * Regular int is not a bit field and it must be either - * u8/u16/u32/u64. + * u8/u16/u32/u64 or __int128. */ static bool btf_type_int_is_regular(const struct btf_type *t) { @@ -538,7 +538,8 @@ static bool btf_type_int_is_regular(const struct btf_type *t) if (BITS_PER_BYTE_MASKED(nr_bits) || BTF_INT_OFFSET(int_data) || (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && - nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64))) { + nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && + nr_bytes != (2 * sizeof(u64)))) { return false; } @@ -1063,9 +1064,9 @@ static int btf_int_check_member(struct btf_verifier_env *env, nr_copy_bits = BTF_INT_BITS(int_data) + BITS_PER_BYTE_MASKED(struct_bits_off); - if (nr_copy_bits > BITS_PER_U64) { + if (nr_copy_bits > BITS_PER_U128) { btf_verifier_log_member(env, struct_type, member, - "nr_copy_bits exceeds 64"); + "nr_copy_bits exceeds 128"); return -EINVAL; } @@ -1119,9 +1120,9 @@ static int btf_int_check_kflag_member(struct btf_verifier_env *env, bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); - if (nr_copy_bits > BITS_PER_U64) { + if (nr_copy_bits > BITS_PER_U128) { btf_verifier_log_member(env, struct_type, member, - "nr_copy_bits exceeds 64"); + "nr_copy_bits exceeds 128"); return -EINVAL; } @@ -1168,9 +1169,9 @@ static s32 btf_int_check_meta(struct btf_verifier_env *env, nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); - if (nr_bits > BITS_PER_U64) { + if (nr_bits > BITS_PER_U128) { btf_verifier_log_type(env, t, "nr_bits exceeds %zu", - BITS_PER_U64); + BITS_PER_U128); return -EINVAL; } @@ -1211,31 +1212,93 @@ static void btf_int_log(struct btf_verifier_env *env, btf_int_encoding_str(BTF_INT_ENCODING(int_data))); } +static void btf_int128_print(struct seq_file *m, void *data) +{ + /* data points to a __int128 number. + * Suppose + * int128_num = *(__int128 *)data; + * The below formulas shows what upper_num and lower_num represents: + * upper_num = int128_num >> 64; + * lower_num = int128_num & 0xffffffffFFFFFFFFULL; + */ + u64 upper_num, lower_num; + +#ifdef __BIG_ENDIAN_BITFIELD + upper_num = *(u64 *)data; + lower_num = *(u64 *)(data + 8); +#else + upper_num = *(u64 *)(data + 8); + lower_num = *(u64 *)data; +#endif + if (upper_num == 0) + seq_printf(m, "0x%llx", lower_num); + else + seq_printf(m, "0x%llx%016llx", upper_num, lower_num); +} + +static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, + u16 right_shift_bits) +{ + u64 upper_num, lower_num; + +#ifdef __BIG_ENDIAN_BITFIELD + upper_num = print_num[0]; + lower_num = print_num[1]; +#else + upper_num = print_num[1]; + lower_num = print_num[0]; +#endif + + /* shake out un-needed bits by shift/or operations */ + if (left_shift_bits >= 64) { + upper_num = lower_num << (left_shift_bits - 64); + lower_num = 0; + } else { + upper_num = (upper_num << left_shift_bits) | + (lower_num >> (64 - left_shift_bits)); + lower_num = lower_num << left_shift_bits; + } + + if (right_shift_bits >= 64) { + lower_num = upper_num >> (right_shift_bits - 64); + upper_num = 0; + } else { + lower_num = (lower_num >> right_shift_bits) | + (upper_num << (64 - right_shift_bits)); + upper_num = upper_num >> right_shift_bits; + } + +#ifdef __BIG_ENDIAN_BITFIELD + print_num[0] = upper_num; + print_num[1] = lower_num; +#else + print_num[0] = lower_num; + print_num[1] = upper_num; +#endif +} + static void btf_bitfield_seq_show(void *data, u8 bits_offset, u8 nr_bits, struct seq_file *m) { u16 left_shift_bits, right_shift_bits; u8 nr_copy_bytes; u8 nr_copy_bits; - u64 print_num; + u64 print_num[2] = {}; nr_copy_bits = nr_bits + bits_offset; nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); - print_num = 0; - memcpy(&print_num, data, nr_copy_bytes); + memcpy(print_num, data, nr_copy_bytes); #ifdef __BIG_ENDIAN_BITFIELD left_shift_bits = bits_offset; #else - left_shift_bits = BITS_PER_U64 - nr_copy_bits; + left_shift_bits = BITS_PER_U128 - nr_copy_bits; #endif - right_shift_bits = BITS_PER_U64 - nr_bits; + right_shift_bits = BITS_PER_U128 - nr_bits; - print_num <<= left_shift_bits; - print_num >>= right_shift_bits; - - seq_printf(m, "0x%llx", print_num); + btf_int128_shift(print_num, left_shift_bits, right_shift_bits); + btf_int128_print(m, print_num); } @@ -1250,7 +1313,7 @@ static void btf_int_bits_seq_show(const struct btf *btf, /* * bits_offset is at most 7. - * BTF_INT_OFFSET() cannot exceed 64 bits. + * BTF_INT_OFFSET() cannot exceed 128 bits. */ total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); data += BITS_ROUNDDOWN_BYTES(total_bits_offset); @@ -1274,6 +1337,9 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, } switch (nr_bits) { + case 128: + btf_int128_print(m, data); + break; case 64: if (sign) seq_printf(m, "%lld", *(s64 *)data); From a80eba20ed596198f0a179466f2944612bcab320 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Jan 2019 17:07:48 -0800 Subject: [PATCH 03/68] tools/bpf: add int128 raw test in test_btf Several int128 raw type tests are added to test_btf. Currently these tests are enabled only for x64 and arm64 for which kernel has CONFIG_ARCH_SUPPORTS_INT128 set. $ test_btf ...... BTF raw test[106] (128-bit int): OK BTF raw test[107] (struct, 128-bit int member): OK BTF raw test[108] (struct, 120-bit int member bitfield): OK BTF raw test[109] (struct, kind_flag, 128-bit int member): OK BTF raw test[110] (struct, kind_flag, 120-bit int member bitfield): OK ...... Acked-by: Martin KaFai Lau Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_btf.c | 93 ++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index a0bd04befe87..69f505806f89 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -2707,6 +2707,99 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid member offset", }, +{ + .descr = "128-bit int", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "int_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, 128-bit int member", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, 120-bit int member bitfield", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 120, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, 0), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, kind_flag, 128-bit int member", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + +{ + .descr = "struct, kind_flag, 120-bit int member bitfield", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 128, 16), /* [2] */ + BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 1), 16), /* [3] */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(120, 0)), + BTF_END_RAW, + }, + BTF_STR_SEC("\0A"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "struct_type_check_btf", + .key_size = sizeof(int), + .value_size = sizeof(int), + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 4, +}, + }; /* struct btf_raw_test raw_tests[] */ static const char *get_next_str(const char *start, const char *end) From ce6ec47a10199d5504dc40a4f0069ef1c4878a4a Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Jan 2019 17:07:49 -0800 Subject: [PATCH 04/68] tools/bpf: refactor test_btf pretty printing for multiple map value formats The test_btf pretty print is refactored in order to easily support multiple map value formats. The next patch will add __int128 type tests which needs macro guard __SIZEOF_INT128__. There is no functionality change with this patch. Acked-by: Martin KaFai Lau Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_btf.c | 112 +++++++++++++++++-------- 1 file changed, 75 insertions(+), 37 deletions(-) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 69f505806f89..02b00c952255 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -134,6 +135,11 @@ static struct btf_header hdr_tmpl = { .hdr_len = sizeof(struct btf_header), }; +/* several different mapv kinds(types) supported by pprint */ +enum pprint_mapv_kind_t { + PPRINT_MAPV_KIND_BASIC = 0, +}; + struct btf_raw_test { const char *descr; const char *str_sec; @@ -156,6 +162,7 @@ struct btf_raw_test { int type_off_delta; int str_off_delta; int str_len_delta; + enum pprint_mapv_kind_t mapv_kind; }; #define BTF_STR_SEC(str) \ @@ -3880,26 +3887,71 @@ static struct btf_pprint_test_meta { }; +static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind) +{ + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) + return sizeof(struct pprint_mapv); -static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i, + assert(0); +} + +static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind, + void *mapv, uint32_t i, int num_cpus, int rounded_value_size) { int cpu; - for (cpu = 0; cpu < num_cpus; cpu++) { - v->ui32 = i + cpu; - v->si32 = -i; - v->unused_bits2a = 3; - v->bits28 = i; - v->unused_bits2b = 3; - v->ui64 = i; - v->aenum = i & 0x03; - v->ui32b = 4; - v->bits2c = 1; - v = (void *)v + rounded_value_size; + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) { + struct pprint_mapv *v = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + v->ui32 = i + cpu; + v->si32 = -i; + v->unused_bits2a = 3; + v->bits28 = i; + v->unused_bits2b = 3; + v->ui64 = i; + v->aenum = i & 0x03; + v->ui32b = 4; + v->bits2c = 1; + v = (void *)v + rounded_value_size; + } } } +ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, + char *expected_line, ssize_t line_size, + bool percpu_map, unsigned int next_key, + int cpu, void *mapv) +{ + ssize_t nexpected_line = -1; + + if (mapv_kind == PPRINT_MAPV_KIND_BASIC) { + struct pprint_mapv *v = mapv; + + nexpected_line = snprintf(expected_line, line_size, + "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," + "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," + "%u,0x%x}\n", + percpu_map ? "\tcpu" : "", + percpu_map ? cpu : next_key, + v->ui32, v->si32, + v->unused_bits2a, + v->bits28, + v->unused_bits2b, + v->ui64, + v->ui8a[0], v->ui8a[1], + v->ui8a[2], v->ui8a[3], + v->ui8a[4], v->ui8a[5], + v->ui8a[6], v->ui8a[7], + pprint_enum_str[v->aenum], + v->ui32b, + v->bits2c); + } + + return nexpected_line; +} + static int check_line(const char *expected_line, int nexpected_line, int expected_line_len, const char *line) { @@ -3921,10 +3973,10 @@ static int check_line(const char *expected_line, int nexpected_line, static int do_test_pprint(int test_num) { const struct btf_raw_test *test = &pprint_test_template[test_num]; + enum pprint_mapv_kind_t mapv_kind = test->mapv_kind; struct bpf_create_map_attr create_attr = {}; bool ordered_map, lossless_map, percpu_map; int err, ret, num_cpus, rounded_value_size; - struct pprint_mapv *mapv = NULL; unsigned int key, nr_read_elems; int map_fd = -1, btf_fd = -1; unsigned int raw_btf_size; @@ -3933,6 +3985,7 @@ static int do_test_pprint(int test_num) char pin_path[255]; size_t line_len = 0; char *line = NULL; + void *mapv = NULL; uint8_t *raw_btf; ssize_t nread; @@ -3985,7 +4038,7 @@ static int do_test_pprint(int test_num) percpu_map = test->percpu_map; num_cpus = percpu_map ? bpf_num_possible_cpus() : 1; - rounded_value_size = round_up(sizeof(struct pprint_mapv), 8); + rounded_value_size = round_up(get_pprint_mapv_size(mapv_kind), 8); mapv = calloc(num_cpus, rounded_value_size); if (CHECK(!mapv, "mapv allocation failure")) { err = -1; @@ -3993,7 +4046,7 @@ static int do_test_pprint(int test_num) } for (key = 0; key < test->max_entries; key++) { - set_pprint_mapv(mapv, key, num_cpus, rounded_value_size); + set_pprint_mapv(mapv_kind, mapv, key, num_cpus, rounded_value_size); bpf_map_update_elem(map_fd, &key, mapv, 0); } @@ -4017,13 +4070,13 @@ static int do_test_pprint(int test_num) ordered_map = test->ordered_map; lossless_map = test->lossless_map; do { - struct pprint_mapv *cmapv; ssize_t nexpected_line; unsigned int next_key; + void *cmapv; int cpu; next_key = ordered_map ? nr_read_elems : atoi(line); - set_pprint_mapv(mapv, next_key, num_cpus, rounded_value_size); + set_pprint_mapv(mapv_kind, mapv, next_key, num_cpus, rounded_value_size); cmapv = mapv; for (cpu = 0; cpu < num_cpus; cpu++) { @@ -4056,31 +4109,16 @@ static int do_test_pprint(int test_num) break; } - nexpected_line = snprintf(expected_line, sizeof(expected_line), - "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," - "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," - "%u,0x%x}\n", - percpu_map ? "\tcpu" : "", - percpu_map ? cpu : next_key, - cmapv->ui32, cmapv->si32, - cmapv->unused_bits2a, - cmapv->bits28, - cmapv->unused_bits2b, - cmapv->ui64, - cmapv->ui8a[0], cmapv->ui8a[1], - cmapv->ui8a[2], cmapv->ui8a[3], - cmapv->ui8a[4], cmapv->ui8a[5], - cmapv->ui8a[6], cmapv->ui8a[7], - pprint_enum_str[cmapv->aenum], - cmapv->ui32b, - cmapv->bits2c); - + nexpected_line = get_pprint_expected_line(mapv_kind, expected_line, + sizeof(expected_line), + percpu_map, next_key, + cpu, cmapv); err = check_line(expected_line, nexpected_line, sizeof(expected_line), line); if (err == -1) goto done; - cmapv = (void *)cmapv + rounded_value_size; + cmapv = cmapv + rounded_value_size; } if (percpu_map) { From 4df3a1d0a5bffd77a9c7b00ac687f4493e57a3ce Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Jan 2019 17:07:50 -0800 Subject: [PATCH 05/68] tools/bpf: add bpffs pretty print test for int128 The bpffs pretty print test is extended to cover int128 types. Tested on an x64 machine. $ test_btf -p ...... BTF pretty print array(#3)......OK PASS:9 SKIP:0 FAIL:0 Acked-by: Martin KaFai Lau Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_btf.c | 79 ++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 02b00c952255..bfd92182533b 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -138,6 +138,7 @@ static struct btf_header hdr_tmpl = { /* several different mapv kinds(types) supported by pprint */ enum pprint_mapv_kind_t { PPRINT_MAPV_KIND_BASIC = 0, + PPRINT_MAPV_KIND_INT128, }; struct btf_raw_test { @@ -3630,6 +3631,16 @@ struct pprint_mapv { uint32_t bits2c:2; }; +#ifdef __SIZEOF_INT128__ +struct pprint_mapv_int128 { + __int128 si128a; + __int128 si128b; + unsigned __int128 bits3:3; + unsigned __int128 bits80:80; + unsigned __int128 ui128; +}; +#endif + static struct btf_raw_test pprint_test_template[] = { { .raw_types = { @@ -3821,6 +3832,35 @@ static struct btf_raw_test pprint_test_template[] = { .max_entries = 128 * 1024, }, +#ifdef __SIZEOF_INT128__ +{ + /* test int128 */ + .raw_types = { + /* unsigned int */ /* [1] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), + /* __int128 */ /* [2] */ + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 128, 16), + /* unsigned __int128 */ /* [3] */ + BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 128, 16), + /* struct pprint_mapv_int128 */ /* [4] */ + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 5), 64), + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)), /* si128a */ + BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 128)), /* si128b */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(3, 256)), /* bits3 */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(80, 259)), /* bits80 */ + BTF_MEMBER_ENC(NAME_TBD, 3, BTF_MEMBER_OFFSET(0, 384)), /* ui128 */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0unsigned int\0__int128\0unsigned __int128\0pprint_mapv_int128\0si128a\0si128b\0bits3\0bits80\0ui128"), + .key_size = sizeof(unsigned int), + .value_size = sizeof(struct pprint_mapv_int128), + .key_type_id = 1, + .value_type_id = 4, + .max_entries = 128 * 1024, + .mapv_kind = PPRINT_MAPV_KIND_INT128, +}, +#endif + }; static struct btf_pprint_test_meta { @@ -3892,6 +3932,11 @@ static size_t get_pprint_mapv_size(enum pprint_mapv_kind_t mapv_kind) if (mapv_kind == PPRINT_MAPV_KIND_BASIC) return sizeof(struct pprint_mapv); +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) + return sizeof(struct pprint_mapv_int128); +#endif + assert(0); } @@ -3917,6 +3962,21 @@ static void set_pprint_mapv(enum pprint_mapv_kind_t mapv_kind, v = (void *)v + rounded_value_size; } } + +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) { + struct pprint_mapv_int128 *v = mapv; + + for (cpu = 0; cpu < num_cpus; cpu++) { + v->si128a = i; + v->si128b = -i; + v->bits3 = i & 0x07; + v->bits80 = (((unsigned __int128)1) << 64) + i; + v->ui128 = (((unsigned __int128)2) << 64) + i; + v = (void *)v + rounded_value_size; + } + } +#endif } ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, @@ -3949,6 +4009,25 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, v->bits2c); } +#ifdef __SIZEOF_INT128__ + if (mapv_kind == PPRINT_MAPV_KIND_INT128) { + struct pprint_mapv_int128 *v = mapv; + + nexpected_line = snprintf(expected_line, line_size, + "%s%u: {0x%lx,0x%lx,0x%lx," + "0x%lx%016lx,0x%lx%016lx}\n", + percpu_map ? "\tcpu" : "", + percpu_map ? cpu : next_key, + (uint64_t)v->si128a, + (uint64_t)v->si128b, + (uint64_t)v->bits3, + (uint64_t)(v->bits80 >> 64), + (uint64_t)v->bits80, + (uint64_t)(v->ui128 >> 64), + (uint64_t)v->ui128); + } +#endif + return nexpected_line; } From e86e513854615b4222fcd5364e91b7bf707dac9a Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 15 Jan 2019 17:07:52 -0800 Subject: [PATCH 06/68] tools/bpf: support __int128 in bpftool map pretty dumper For formatted output, currently when json is enabled, the decimal number is required. Similar to kernel bpffs printout, for int128 numbers, only hex numbers are dumped, which are quoted as strings. The below is an example to show plain and json pretty print based on the map in test_btf pretty print test. $ bpftool m s 75: hash name pprint_test_has flags 0x0 key 4B value 112B max_entries 4 memlock 4096B $ bpftool m d id 75 ...... { "key": 3, "value": { "ui32": 3, "ui16": 0, "si32": -3, "unused_bits2a": 0x3, "bits28": 0x3, "unused_bits2b": 0x3, "": { "ui64": 3, "ui8a": [3,0,0,0,0,0,0,0 ] }, "aenum": 3, "ui32b": 4, "bits2c": 0x1, "si128a": 0x3, "si128b": 0xfffffffd, "bits3": 0x3, "bits80": 0x10000000000000003, "ui128": 0x20000000000000003 } }, ...... $ bptfool -p -j m d id 75 ...... { "key": ["0x03","0x00","0x00","0x00" ], "value": ["0x03","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0xfd","0xff","0xff","0xff","0x0f","0x00","0x00","0xc0", "0x03","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x03","0x00","0x00","0x00","0x04","0x00","0x00","0x00", "0x01","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x00","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x03","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x00","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0xfd","0xff","0xff","0xff","0x00","0x00","0x00","0x00", "0x00","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x1b","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x08","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x03","0x00","0x00","0x00","0x00","0x00","0x00","0x00", "0x02","0x00","0x00","0x00","0x00","0x00","0x00","0x00" ], "formatted": { "key": 3, "value": { "ui32": 3, "ui16": 0, "si32": -3, "unused_bits2a": "0x3", "bits28": "0x3", "unused_bits2b": "0x3", "": { "ui64": 3, "ui8a": [3,0,0,0,0,0,0,0 ] }, "aenum": 3, "ui32b": 4, "bits2c": "0x1", "si128a": "0x3", "si128b": "0xfffffffd", "bits3": "0x3", "bits80": "0x10000000000000003", "ui128": "0x20000000000000003" } } } ...... Acked-by: Martin KaFai Lau Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/btf_dumper.c | 98 +++++++++++++++++++++++++++++----- 1 file changed, 86 insertions(+), 12 deletions(-) diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 6ba5f567a9d8..e63bce0755eb 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -73,35 +73,104 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id, return ret; } +static void btf_int128_print(json_writer_t *jw, const void *data, + bool is_plain_text) +{ + /* data points to a __int128 number. + * Suppose + * int128_num = *(__int128 *)data; + * The below formulas shows what upper_num and lower_num represents: + * upper_num = int128_num >> 64; + * lower_num = int128_num & 0xffffffffFFFFFFFFULL; + */ + __u64 upper_num, lower_num; + +#ifdef __BIG_ENDIAN_BITFIELD + upper_num = *(__u64 *)data; + lower_num = *(__u64 *)(data + 8); +#else + upper_num = *(__u64 *)(data + 8); + lower_num = *(__u64 *)data; +#endif + + if (is_plain_text) { + if (upper_num == 0) + jsonw_printf(jw, "0x%llx", lower_num); + else + jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num); + } else { + if (upper_num == 0) + jsonw_printf(jw, "\"0x%llx\"", lower_num); + else + jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num); + } +} + +static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits, + u16 right_shift_bits) +{ + __u64 upper_num, lower_num; + +#ifdef __BIG_ENDIAN_BITFIELD + upper_num = print_num[0]; + lower_num = print_num[1]; +#else + upper_num = print_num[1]; + lower_num = print_num[0]; +#endif + + /* shake out un-needed bits by shift/or operations */ + if (left_shift_bits >= 64) { + upper_num = lower_num << (left_shift_bits - 64); + lower_num = 0; + } else { + upper_num = (upper_num << left_shift_bits) | + (lower_num >> (64 - left_shift_bits)); + lower_num = lower_num << left_shift_bits; + } + + if (right_shift_bits >= 64) { + lower_num = upper_num >> (right_shift_bits - 64); + upper_num = 0; + } else { + lower_num = (lower_num >> right_shift_bits) | + (upper_num << (64 - right_shift_bits)); + upper_num = upper_num >> right_shift_bits; + } + +#ifdef __BIG_ENDIAN_BITFIELD + print_num[0] = upper_num; + print_num[1] = lower_num; +#else + print_num[0] = lower_num; + print_num[1] = upper_num; +#endif +} + static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset, const void *data, json_writer_t *jw, bool is_plain_text) { int left_shift_bits, right_shift_bits; + __u64 print_num[2] = {}; int bytes_to_copy; int bits_to_copy; - __u64 print_num; bits_to_copy = bit_offset + nr_bits; bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); - print_num = 0; - memcpy(&print_num, data, bytes_to_copy); + memcpy(print_num, data, bytes_to_copy); #if defined(__BIG_ENDIAN_BITFIELD) left_shift_bits = bit_offset; #elif defined(__LITTLE_ENDIAN_BITFIELD) - left_shift_bits = 64 - bits_to_copy; + left_shift_bits = 128 - bits_to_copy; #else #error neither big nor little endian #endif - right_shift_bits = 64 - nr_bits; + right_shift_bits = 128 - nr_bits; - print_num <<= left_shift_bits; - print_num >>= right_shift_bits; - if (is_plain_text) - jsonw_printf(jw, "0x%llx", print_num); - else - jsonw_printf(jw, "%llu", print_num); + btf_int128_shift(print_num, left_shift_bits, right_shift_bits); + btf_int128_print(jw, print_num, is_plain_text); } @@ -113,7 +182,7 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset, int total_bits_offset; /* bits_offset is at most 7. - * BTF_INT_OFFSET() cannot exceed 64 bits. + * BTF_INT_OFFSET() cannot exceed 128 bits. */ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); data += BITS_ROUNDDOWN_BYTES(total_bits_offset); @@ -139,6 +208,11 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset, return 0; } + if (nr_bits == 128) { + btf_int128_print(jw, data, is_plain_text); + return 0; + } + switch (BTF_INT_ENCODING(*int_type)) { case 0: if (BTF_INT_BITS(*int_type) == 64) From 7d7209cb9afdfc3dfcdc1b45013e62afb1ad71b6 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:09:59 -0800 Subject: [PATCH 07/68] bpftool: make key and value optional in update command Bpftool expects both key and value for 'update' operations. For some map types, key should not be specified. Support updating those map types. Before: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 Error: did not find key After: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- .../bpf/bpftool/Documentation/bpftool-map.rst | 4 +-- tools/bpf/bpftool/map.c | 36 +++++++++++++++---- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 64b001b4f777..1f1dfe06e66d 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -25,7 +25,7 @@ MAP COMMANDS | **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \ | **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*] | **bpftool** **map dump** *MAP* -| **bpftool** **map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*] +| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] | **bpftool** **map lookup** *MAP* **key** *DATA* | **bpftool** **map getnext** *MAP* [**key** *DATA*] | **bpftool** **map delete** *MAP* **key** *DATA* @@ -62,7 +62,7 @@ DESCRIPTION **bpftool map dump** *MAP* Dump all entries in a given *MAP*. - **bpftool map update** *MAP* **key** *DATA* **value** *VALUE* [*UPDATE_FLAGS*] + **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] Update map entry for a given *KEY*. *UPDATE_FLAGS* can be one of: **any** update existing entry diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 2037e3dc864b..61453a1ba6e1 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -779,6 +779,32 @@ exit_free: return err; } +static int alloc_key_value(struct bpf_map_info *info, void **key, void **value) +{ + *key = NULL; + *value = NULL; + + if (info->key_size) { + *key = malloc(info->key_size); + if (!*key) { + p_err("key mem alloc failed"); + return -1; + } + } + + if (info->value_size) { + *value = alloc_value(info); + if (!*value) { + p_err("value mem alloc failed"); + free(*key); + *key = NULL; + return -1; + } + } + + return 0; +} + static int do_update(int argc, char **argv) { struct bpf_map_info info = {}; @@ -795,13 +821,9 @@ static int do_update(int argc, char **argv) if (fd < 0) return -1; - key = malloc(info.key_size); - value = alloc_value(&info); - if (!key || !value) { - p_err("mem alloc failed"); - err = -1; + err = alloc_key_value(&info, &key, &value); + if (err) goto exit_free; - } err = parse_elem(argv, &info, key, value, info.key_size, info.value_size, &flags, &value_fd); @@ -1135,7 +1157,7 @@ static int do_help(int argc, char **argv) " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n" " [dev NAME]\n" " %s %s dump MAP\n" - " %s %s update MAP key DATA value VALUE [UPDATE_FLAGS]\n" + " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n" " %s %s lookup MAP key DATA\n" " %s %s getnext MAP [key DATA]\n" " %s %s delete MAP key DATA\n" From 8a89fff60a7ef63a23b112bdfa76d7ba160a4a68 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:00 -0800 Subject: [PATCH 08/68] bpftool: make key optional in lookup command Bpftool expects key for 'lookup' operations. For some map types, key should not be specified. Support looking up those map types. Before: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map lookup pinned /sys/fs/bpf/q Error: did not find key After: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map lookup pinned /sys/fs/bpf/q key: value: 00 01 02 03 Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Documentation/bpftool-map.rst | 4 ++-- tools/bpf/bpftool/map.c | 10 +++------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 1f1dfe06e66d..f34cace771bd 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -26,7 +26,7 @@ MAP COMMANDS | **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*] | **bpftool** **map dump** *MAP* | **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*] -| **bpftool** **map lookup** *MAP* **key** *DATA* +| **bpftool** **map lookup** *MAP* [**key** *DATA*] | **bpftool** **map getnext** *MAP* [**key** *DATA*] | **bpftool** **map delete** *MAP* **key** *DATA* | **bpftool** **map pin** *MAP* *FILE* @@ -75,7 +75,7 @@ DESCRIPTION the bytes are parsed as decimal values, unless a "0x" prefix (for hexadecimal) or a "0" prefix (for octal) is provided. - **bpftool map lookup** *MAP* **key** *DATA* + **bpftool map lookup** *MAP* [**key** *DATA*] Lookup **key** in the map. **bpftool map getnext** *MAP* [**key** *DATA*] diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 61453a1ba6e1..4256842f9664 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -865,13 +865,9 @@ static int do_lookup(int argc, char **argv) if (fd < 0) return -1; - key = malloc(info.key_size); - value = alloc_value(&info); - if (!key || !value) { - p_err("mem alloc failed"); - err = -1; + err = alloc_key_value(&info, &key, &value); + if (err) goto exit_free; - } err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL); if (err) @@ -1158,7 +1154,7 @@ static int do_help(int argc, char **argv) " [dev NAME]\n" " %s %s dump MAP\n" " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n" - " %s %s lookup MAP key DATA\n" + " %s %s lookup MAP [key DATA]\n" " %s %s getnext MAP [key DATA]\n" " %s %s delete MAP key DATA\n" " %s %s pin MAP FILE\n" From 04a5d323e6b90856b479be7a2b08d4cba6b720b6 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:01 -0800 Subject: [PATCH 09/68] bpftool: don't print empty key/value for maps When doing dump or lookup, don't print key if key_size == 0 or value if value_size == 0. The initial usecase is queue and stack, where we have only values. This is for regular output only, json still has all the fields. Before: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map lookup pinned /sys/fs/bpf/q key: value: 00 01 02 03 After: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map lookup pinned /sys/fs/bpf/q value: 00 01 02 03 Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 47 ++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 4256842f9664..3f599399913b 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -285,16 +285,21 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, single_line = info->key_size + info->value_size <= 24 && !break_names; - printf("key:%c", break_names ? '\n' : ' '); - fprint_hex(stdout, key, info->key_size, " "); + if (info->key_size) { + printf("key:%c", break_names ? '\n' : ' '); + fprint_hex(stdout, key, info->key_size, " "); - printf(single_line ? " " : "\n"); + printf(single_line ? " " : "\n"); + } - printf("value:%c", break_names ? '\n' : ' '); - if (value) - fprint_hex(stdout, value, info->value_size, " "); - else - printf(""); + if (info->value_size) { + printf("value:%c", break_names ? '\n' : ' '); + if (value) + fprint_hex(stdout, value, info->value_size, + " "); + else + printf(""); + } printf("\n"); } else { @@ -303,19 +308,23 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, n = get_possible_cpus(); step = round_up(info->value_size, 8); - printf("key:\n"); - fprint_hex(stdout, key, info->key_size, " "); - printf("\n"); - for (i = 0; i < n; i++) { - printf("value (CPU %02d):%c", - i, info->value_size > 16 ? '\n' : ' '); - if (value) - fprint_hex(stdout, value + i * step, - info->value_size, " "); - else - printf(""); + if (info->key_size) { + printf("key:\n"); + fprint_hex(stdout, key, info->key_size, " "); printf("\n"); } + if (info->value_size) { + for (i = 0; i < n; i++) { + printf("value (CPU %02d):%c", + i, info->value_size > 16 ? '\n' : ' '); + if (value) + fprint_hex(stdout, value + i * step, + info->value_size, " "); + else + printf(""); + printf("\n"); + } + } } } From 66cf6e0b1248e576468956cb33ad2dabc45163dc Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:02 -0800 Subject: [PATCH 10/68] bpftool: add peek command This is intended to be used with queues and stacks and be more user-friendly than 'lookup' without key/value. Example: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map update pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map peek pinned /sys/fs/bpf/q value: 00 01 02 03 Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Documentation/bpftool-map.rst | 4 ++++ tools/bpf/bpftool/map.c | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index f34cace771bd..b79da683da88 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -31,6 +31,7 @@ MAP COMMANDS | **bpftool** **map delete** *MAP* **key** *DATA* | **bpftool** **map pin** *MAP* *FILE* | **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] +| **bpftool** **map peek** *MAP* | **bpftool** **map help** | | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } @@ -107,6 +108,9 @@ DESCRIPTION replace any existing ring. Any other application will stop receiving events if it installed its rings earlier. + **bpftool map peek** *MAP* + Peek next **value** in the queue or stack. + **bpftool map help** Print short help message. diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 3f599399913b..d7344fcd2089 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -1168,6 +1168,7 @@ static int do_help(int argc, char **argv) " %s %s delete MAP key DATA\n" " %s %s pin MAP FILE\n" " %s %s event_pipe MAP [cpu N index M]\n" + " %s %s peek MAP\n" " %s %s help\n" "\n" " " HELP_SPEC_MAP "\n" @@ -1185,7 +1186,7 @@ static int do_help(int argc, char **argv) bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2]); + bin_name, argv[-2], bin_name, argv[-2]); return 0; } @@ -1202,6 +1203,7 @@ static const struct cmd cmds[] = { { "pin", do_pin }, { "event_pipe", do_event_pipe }, { "create", do_create }, + { "peek", do_lookup }, { 0 } }; From 549d4d3da7c4a75c25ea2726223ab96a1eeb135f Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:03 -0800 Subject: [PATCH 11/68] bpftool: add push and enqueue commands This is intended to be used with queues and stacks and be more user-friendly than 'update' without the key. Example: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map push pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map peek pinned /sys/fs/bpf/q value: 00 01 02 03 bpftool map create /sys/fs/bpf/s type stack value 4 entries 10 name s bpftool map enqueue pinned /sys/fs/bpf/s value 0 1 2 3 bpftool map peek pinned /sys/fs/bpf/s value: 00 01 02 03 Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/Documentation/bpftool-map.rst | 8 ++++++++ tools/bpf/bpftool/map.c | 7 ++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index b79da683da88..435a79d2eed5 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -32,6 +32,8 @@ MAP COMMANDS | **bpftool** **map pin** *MAP* *FILE* | **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] | **bpftool** **map peek** *MAP* +| **bpftool** **map push** *MAP* **value** *VALUE* +| **bpftool** **map enqueue** *MAP* **value** *VALUE* | **bpftool** **map help** | | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } @@ -111,6 +113,12 @@ DESCRIPTION **bpftool map peek** *MAP* Peek next **value** in the queue or stack. + **bpftool map push** *MAP* **value** *VALUE* + Push **value** onto the stack. + + **bpftool map enqueue** *MAP* **value** *VALUE* + Enqueue **value** into the queue. + **bpftool map help** Print short help message. diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index d7344fcd2089..6b5fcbe2d9d4 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -1169,6 +1169,8 @@ static int do_help(int argc, char **argv) " %s %s pin MAP FILE\n" " %s %s event_pipe MAP [cpu N index M]\n" " %s %s peek MAP\n" + " %s %s push MAP value VALUE\n" + " %s %s enqueue MAP value VALUE\n" " %s %s help\n" "\n" " " HELP_SPEC_MAP "\n" @@ -1186,7 +1188,8 @@ static int do_help(int argc, char **argv) bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2], bin_name, argv[-2]); + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], + bin_name, argv[-2]); return 0; } @@ -1204,6 +1207,8 @@ static const struct cmd cmds[] = { { "event_pipe", do_event_pipe }, { "create", do_create }, { "peek", do_lookup }, + { "push", do_update }, + { "enqueue", do_update }, { 0 } }; From 74f312ef847f006aadbe5061eeefe375200e3207 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:04 -0800 Subject: [PATCH 12/68] bpftool: add pop and dequeue commands This is intended to be used with queues and stacks, it pops and prints the last element via bpf_map_lookup_and_delete_elem. Example: bpftool map create /sys/fs/bpf/q type queue value 4 entries 10 name q bpftool map push pinned /sys/fs/bpf/q value 0 1 2 3 bpftool map pop pinned /sys/fs/bpf/q value: 00 01 02 03 bpftool map pop pinned /sys/fs/bpf/q Error: empty map bpftool map create /sys/fs/bpf/s type stack value 4 entries 10 name s bpftool map enqueue pinned /sys/fs/bpf/s value 0 1 2 3 bpftool map dequeue pinned /sys/fs/bpf/s value: 00 01 02 03 bpftool map dequeue pinned /sys/fs/bpf/s Error: empty map Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- .../bpf/bpftool/Documentation/bpftool-map.rst | 8 ++ tools/bpf/bpftool/map.c | 125 +++++++++++++----- 2 files changed, 98 insertions(+), 35 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 435a79d2eed5..0584c31d3fe2 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -33,7 +33,9 @@ MAP COMMANDS | **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*] | **bpftool** **map peek** *MAP* | **bpftool** **map push** *MAP* **value** *VALUE* +| **bpftool** **map pop** *MAP* | **bpftool** **map enqueue** *MAP* **value** *VALUE* +| **bpftool** **map dequeue** *MAP* | **bpftool** **map help** | | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } @@ -116,9 +118,15 @@ DESCRIPTION **bpftool map push** *MAP* **value** *VALUE* Push **value** onto the stack. + **bpftool map pop** *MAP* + Pop and print **value** from the stack. + **bpftool map enqueue** *MAP* **value** *VALUE* Enqueue **value** into the queue. + **bpftool map dequeue** *MAP* + Dequeue and print **value** from the queue. + **bpftool map help** Print short help message. diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 6b5fcbe2d9d4..850c99ac980f 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -857,12 +857,51 @@ exit_free: return err; } +static void print_key_value(struct bpf_map_info *info, void *key, + void *value) +{ + json_writer_t *btf_wtr; + struct btf *btf = NULL; + int err; + + err = btf__get_from_id(info->btf_id, &btf); + if (err) { + p_err("failed to get btf"); + return; + } + + if (json_output) { + print_entry_json(info, key, value, btf); + } else if (btf) { + /* if here json_wtr wouldn't have been initialised, + * so let's create separate writer for btf + */ + btf_wtr = get_btf_writer(); + if (!btf_wtr) { + p_info("failed to create json writer for btf. falling back to plain output"); + btf__free(btf); + btf = NULL; + print_entry_plain(info, key, value); + } else { + struct btf_dumper d = { + .btf = btf, + .jw = btf_wtr, + .is_plain_text = true, + }; + + do_dump_btf(&d, info, key, value); + jsonw_destroy(&btf_wtr); + } + } else { + print_entry_plain(info, key, value); + } + btf__free(btf); +} + static int do_lookup(int argc, char **argv) { struct bpf_map_info info = {}; __u32 len = sizeof(info); - json_writer_t *btf_wtr; - struct btf *btf = NULL; void *key, *value; int err; int fd; @@ -900,43 +939,12 @@ static int do_lookup(int argc, char **argv) } /* here means bpf_map_lookup_elem() succeeded */ - err = btf__get_from_id(info.btf_id, &btf); - if (err) { - p_err("failed to get btf"); - goto exit_free; - } - - if (json_output) { - print_entry_json(&info, key, value, btf); - } else if (btf) { - /* if here json_wtr wouldn't have been initialised, - * so let's create separate writer for btf - */ - btf_wtr = get_btf_writer(); - if (!btf_wtr) { - p_info("failed to create json writer for btf. falling back to plain output"); - btf__free(btf); - btf = NULL; - print_entry_plain(&info, key, value); - } else { - struct btf_dumper d = { - .btf = btf, - .jw = btf_wtr, - .is_plain_text = true, - }; - - do_dump_btf(&d, &info, key, value); - jsonw_destroy(&btf_wtr); - } - } else { - print_entry_plain(&info, key, value); - } + print_key_value(&info, key, value); exit_free: free(key); free(value); close(fd); - btf__free(btf); return err; } @@ -1149,6 +1157,49 @@ static int do_create(int argc, char **argv) return 0; } +static int do_pop_dequeue(int argc, char **argv) +{ + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + void *key, *value; + int err; + int fd; + + if (argc < 2) + usage(); + + fd = map_parse_fd_and_info(&argc, &argv, &info, &len); + if (fd < 0) + return -1; + + err = alloc_key_value(&info, &key, &value); + if (err) + goto exit_free; + + err = bpf_map_lookup_and_delete_elem(fd, key, value); + if (err) { + if (errno == ENOENT) { + if (json_output) + jsonw_null(json_wtr); + else + printf("Error: empty map\n"); + } else { + p_err("pop failed: %s", strerror(errno)); + } + + goto exit_free; + } + + print_key_value(&info, key, value); + +exit_free: + free(key); + free(value); + close(fd); + + return err; +} + static int do_help(int argc, char **argv) { if (json_output) { @@ -1170,7 +1221,9 @@ static int do_help(int argc, char **argv) " %s %s event_pipe MAP [cpu N index M]\n" " %s %s peek MAP\n" " %s %s push MAP value VALUE\n" + " %s %s pop MAP\n" " %s %s enqueue MAP value VALUE\n" + " %s %s dequeue MAP\n" " %s %s help\n" "\n" " " HELP_SPEC_MAP "\n" @@ -1189,7 +1242,7 @@ static int do_help(int argc, char **argv) bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], - bin_name, argv[-2]); + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); return 0; } @@ -1209,6 +1262,8 @@ static const struct cmd cmds[] = { { "peek", do_lookup }, { "push", do_update }, { "enqueue", do_update }, + { "pop", do_pop_dequeue }, + { "dequeue", do_pop_dequeue }, { 0 } }; From 55c70bffc772897f00336b36ff74a4007f7a346d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 16 Jan 2019 11:10:05 -0800 Subject: [PATCH 13/68] bpftool: add bash completion for peek/push/enqueue/pop/dequeue bpftool map peek id - suggests only queue and stack map ids bpftool map pop id - suggests only stack map ids bpftool map dequeue id - suggests only queue map ids bpftool map push id - suggests only stack map ids bpftool map enqueue id - suggests only queue map ids bpftool map push id 1 - suggests 'value', not 'key' bpftool map enqueue id 2 - suggests 'value', not 'key' bpftool map update id - suggests 'value', not 'key' bpftool map lookup id - suggests nothing Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/bash-completion/bpftool | 91 ++++++++++++++++++----- 1 file changed, 73 insertions(+), 18 deletions(-) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index e4e4fab1b8c7..47143d793b33 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -50,14 +50,15 @@ _bpftool_get_map_ids() command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) } -_bpftool_get_perf_map_ids() +# Takes map type and adds matching map ids to the list of suggestions. +_bpftool_get_map_ids_for_type() { + local type="$1" COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \ - command grep -C2 perf_event_array | \ + command grep -C2 "$type" | \ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) } - _bpftool_get_prog_ids() { COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ @@ -99,15 +100,25 @@ _sysfs_get_netdevs() "$cur" ) ) } -# For bpftool map update: retrieve type of the map to update. -_bpftool_map_update_map_type() +# Retrieve type of the map that we are operating on. +_bpftool_map_guess_map_type() { local keyword ref for (( idx=3; idx < ${#words[@]}-1; idx++ )); do - if [[ ${words[$((idx-2))]} == "update" ]]; then - keyword=${words[$((idx-1))]} - ref=${words[$((idx))]} - fi + case "${words[$((idx-2))]}" in + lookup|update) + keyword=${words[$((idx-1))]} + ref=${words[$((idx))]} + ;; + push) + printf "stack" + return 0 + ;; + enqueue) + printf "queue" + return 0 + ;; + esac done [[ -z $ref ]] && return 0 @@ -119,6 +130,8 @@ _bpftool_map_update_map_type() _bpftool_map_update_get_id() { + local command="$1" + # Is it the map to update, or a map to insert into the map to update? # Search for "value" keyword. local idx value @@ -128,11 +141,24 @@ _bpftool_map_update_get_id() break fi done - [[ $value -eq 0 ]] && _bpftool_get_map_ids && return 0 + if [[ $value -eq 0 ]]; then + case "$command" in + push) + _bpftool_get_map_ids_for_type stack + ;; + enqueue) + _bpftool_get_map_ids_for_type queue + ;; + *) + _bpftool_get_map_ids + ;; + esac + return 0 + fi # Id to complete is for a value. It can be either prog id or map id. This # depends on the type of the map to update. - local type=$(_bpftool_map_update_map_type) + local type=$(_bpftool_map_guess_map_type) case $type in array_of_maps|hash_of_maps) _bpftool_get_map_ids @@ -382,14 +408,28 @@ _bpftool() map) local MAP_TYPE='id pinned' case $command in - show|list|dump) + show|list|dump|peek|pop|dequeue) case $prev in $command) COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) return 0 ;; id) - _bpftool_get_map_ids + case "$command" in + peek) + _bpftool_get_map_ids_for_type stack + _bpftool_get_map_ids_for_type queue + ;; + pop) + _bpftool_get_map_ids_for_type stack + ;; + dequeue) + _bpftool_get_map_ids_for_type queue + ;; + *) + _bpftool_get_map_ids + ;; + esac return 0 ;; *) @@ -447,19 +487,25 @@ _bpftool() COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) ) ;; *) + case $(_bpftool_map_guess_map_type) in + queue|stack) + return 0 + ;; + esac + _bpftool_once_attr 'key' return 0 ;; esac ;; - update) + update|push|enqueue) case $prev in $command) COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) return 0 ;; id) - _bpftool_map_update_get_id + _bpftool_map_update_get_id $command return 0 ;; key) @@ -468,7 +514,7 @@ _bpftool() value) # We can have bytes, or references to a prog or a # map, depending on the type of the map to update. - case $(_bpftool_map_update_map_type) in + case "$(_bpftool_map_guess_map_type)" in array_of_maps|hash_of_maps) local MAP_TYPE='id pinned' COMPREPLY+=( $( compgen -W "$MAP_TYPE" \ @@ -490,6 +536,13 @@ _bpftool() return 0 ;; *) + case $(_bpftool_map_guess_map_type) in + queue|stack) + _bpftool_once_attr 'value' + return 0; + ;; + esac + _bpftool_once_attr 'key' local UPDATE_FLAGS='any exist noexist' for (( idx=3; idx < ${#words[@]}-1; idx++ )); do @@ -508,6 +561,7 @@ _bpftool() return 0 fi done + return 0 ;; esac @@ -527,7 +581,7 @@ _bpftool() return 0 ;; id) - _bpftool_get_perf_map_ids + _bpftool_get_map_ids_for_type perf_event_array return 0 ;; cpu) @@ -546,7 +600,8 @@ _bpftool() *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'delete dump getnext help \ - lookup pin event_pipe show list update create' -- \ + lookup pin event_pipe show list update create \ + peek push enqueue pop dequeue' -- \ "$cur" ) ) ;; esac From d0b2818efbe27e6c2e0c52621c8db18eb5abb5e1 Mon Sep 17 00:00:00 2001 From: Peter Oskolkov Date: Wed, 16 Jan 2019 10:43:01 -0800 Subject: [PATCH 14/68] bpf: fix a (false) compiler warning An older GCC compiler complains: kernel/bpf/verifier.c: In function 'bpf_check': kernel/bpf/verifier.c:4***:13: error: 'prev_offset' may be used uninitialized in this function [-Werror=maybe-uninitialized] } else if (krecord[i].insn_offset <= prev_offset) { ^ kernel/bpf/verifier.c:4***:38: note: 'prev_offset' was declared here u32 i, nfuncs, urec_size, min_size, prev_offset; Although the compiler is wrong here, the patch makes sure that prev_offset is always initialized, just to silence the warning. v2: fix a spelling error in the commit message. Signed-off-by: Peter Oskolkov Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- kernel/bpf/verifier.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 56674a7c3778..ce87198ecd01 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4997,13 +4997,14 @@ static int check_btf_func(struct bpf_verifier_env *env, const union bpf_attr *attr, union bpf_attr __user *uattr) { - u32 i, nfuncs, urec_size, min_size, prev_offset; + u32 i, nfuncs, urec_size, min_size; u32 krec_size = sizeof(struct bpf_func_info); struct bpf_func_info *krecord; const struct btf_type *type; struct bpf_prog *prog; const struct btf *btf; void __user *urecord; + u32 prev_offset = 0; int ret = 0; nfuncs = attr->func_info_cnt; From 49eb7ab3b2b5a106307494d2a46248ecb1530578 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:50 +0000 Subject: [PATCH 15/68] tools: bpftool: add basic probe capability, probe syscall availability Add a new component and command for bpftool, in order to probe the system to dump a set of eBPF-related parameters so that users can know what features are available on the system. Parameters are dumped in plain or JSON output (with -j/-p options). The current patch introduces probing of one simple parameter: availability of the bpf() system call. Later commits will add other probes. Sample output: # bpftool feature probe kernel Scanning system call availability... bpf() syscall is available # bpftool --json --pretty feature probe kernel { "syscall_config": { "have_bpf_syscall": true } } The optional "kernel" keyword enforces probing of the current system, which is the only possible behaviour at this stage. It can be safely omitted. The feature comes with the relevant man page, but bash completion will come in a dedicated commit. v3: - Do not probe kernel version. Contrarily to what is written below for v2, we can have the kernel version retrieved in libbpf instead of bpftool (in the patch adding probing for program types). v2: - Remove C-style macros output from this patch. - Even though kernel version is no longer needed for testing kprobes availability, note that we still collect it in this patch so that bpftool gets able to probe (in next patches) older kernels as well. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- .../bpftool/Documentation/bpftool-cgroup.rst | 1 + .../bpftool/Documentation/bpftool-feature.rst | 60 ++++++++ .../bpf/bpftool/Documentation/bpftool-map.rst | 1 + .../bpf/bpftool/Documentation/bpftool-net.rst | 1 + .../bpftool/Documentation/bpftool-perf.rst | 1 + .../bpftool/Documentation/bpftool-prog.rst | 1 + tools/bpf/bpftool/Documentation/bpftool.rst | 1 + tools/bpf/bpftool/feature.c | 131 ++++++++++++++++++ tools/bpf/bpftool/main.c | 3 +- tools/bpf/bpftool/main.h | 1 + 10 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 tools/bpf/bpftool/Documentation/bpftool-feature.rst create mode 100644 tools/bpf/bpftool/feature.c diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index d07ccf8a23f7..d43fce568ef7 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -142,5 +142,6 @@ SEE ALSO **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8), + **bpftool-feature**\ (8), **bpftool-net**\ (8), **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst new file mode 100644 index 000000000000..40ac13c0b782 --- /dev/null +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -0,0 +1,60 @@ +=============== +bpftool-feature +=============== +------------------------------------------------------------------------------- +tool for inspection of eBPF-related parameters for Linux kernel or net device +------------------------------------------------------------------------------- + +:Manual section: 8 + +SYNOPSIS +======== + + **bpftool** [*OPTIONS*] **feature** *COMMAND* + + *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] } + + *COMMANDS* := { **probe** | **help** } + +MAP COMMANDS +============= + +| **bpftool** **feature probe** [**kernel**] +| **bpftool** **feature help** + +DESCRIPTION +=========== + **bpftool feature probe** [**kernel**] + Probe the running kernel and dump a number of eBPF-related + parameters, such as availability of the **bpf()** system call. + + Keyword **kernel** can be omitted. + + **bpftool feature help** + Print short help message. + +OPTIONS +======= + -h, --help + Print short generic help message (similar to **bpftool help**). + + -v, --version + Print version number (similar to **bpftool version**). + + -j, --json + Generate JSON output. For commands that cannot produce JSON, this + option has no effect. + + -p, --pretty + Generate human-readable JSON output. Implies **-j**. + +SEE ALSO +======== + **bpf**\ (2), + **bpf-helpers**\ (7), + **bpftool**\ (8), + **bpftool-prog**\ (8), + **bpftool-map**\ (8), + **bpftool-cgroup**\ (8), + **bpftool-net**\ (8), + **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 0584c31d3fe2..5c984ffc9f01 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -256,5 +256,6 @@ SEE ALSO **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8), + **bpftool-feature**\ (8), **bpftool-net**\ (8), **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index ed87c9b619ad..779dab3650ee 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -142,4 +142,5 @@ SEE ALSO **bpftool-prog**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8), + **bpftool-feature**\ (8), **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst index f4c5e5538bb8..bca5590a80d0 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst @@ -84,4 +84,5 @@ SEE ALSO **bpftool-prog**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8), + **bpftool-feature**\ (8), **bpftool-net**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 58c8369b77dd..13b56102f528 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -258,5 +258,6 @@ SEE ALSO **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8), + **bpftool-feature**\ (8), **bpftool-net**\ (8), **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index e1677e81ed59..27153bb816ac 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -72,5 +72,6 @@ SEE ALSO **bpftool-prog**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8), + **bpftool-feature**\ (8), **bpftool-net**\ (8), **bpftool-perf**\ (8) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c new file mode 100644 index 000000000000..954fb12a5228 --- /dev/null +++ b/tools/bpf/bpftool/feature.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (c) 2019 Netronome Systems, Inc. */ + +#include +#include +#include +#include + +#include +#include + +#include + +#include "main.h" + +enum probe_component { + COMPONENT_UNSPEC, + COMPONENT_KERNEL, +}; + +/* Printing utility functions */ + +static void +print_bool_feature(const char *feat_name, const char *plain_name, bool res) +{ + if (json_output) + jsonw_bool_field(json_wtr, feat_name, res); + else + printf("%s is %savailable\n", plain_name, res ? "" : "NOT "); +} + +static void +print_start_section(const char *json_title, const char *plain_title) +{ + if (json_output) { + jsonw_name(json_wtr, json_title); + jsonw_start_object(json_wtr); + } else { + printf("%s\n", plain_title); + } +} + +/* Probing functions */ + +static bool probe_bpf_syscall(void) +{ + bool res; + + bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0); + res = (errno != ENOSYS); + + print_bool_feature("have_bpf_syscall", + "bpf() syscall", + res); + + return res; +} + +static int do_probe(int argc, char **argv) +{ + enum probe_component target = COMPONENT_UNSPEC; + + /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN). + * Let's approximate, and restrict usage to root user only. + */ + if (geteuid()) { + p_err("please run this command as root user"); + return -1; + } + + set_max_rlimit(); + + while (argc) { + if (is_prefix(*argv, "kernel")) { + if (target != COMPONENT_UNSPEC) { + p_err("component to probe already specified"); + return -1; + } + target = COMPONENT_KERNEL; + NEXT_ARG(); + } else { + p_err("expected no more arguments, 'kernel', got: '%s'?", + *argv); + return -1; + } + } + + if (json_output) + jsonw_start_object(json_wtr); + + print_start_section("syscall_config", + "Scanning system call availability..."); + + probe_bpf_syscall(); + + if (json_output) { + /* End current "section" of probes */ + jsonw_end_object(json_wtr); + /* End root object */ + jsonw_end_object(json_wtr); + } + + return 0; +} + +static int do_help(int argc, char **argv) +{ + if (json_output) { + jsonw_null(json_wtr); + return 0; + } + + fprintf(stderr, + "Usage: %s %s probe [kernel]\n" + " %s %s help\n" + "", + bin_name, argv[-2], bin_name, argv[-2]); + + return 0; +} + +static const struct cmd cmds[] = { + { "help", do_help }, + { "probe", do_probe }, + { 0 } +}; + +int do_feature(int argc, char **argv) +{ + return cmd_select(cmds, argc, argv, do_help); +} diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index f44a1c2c4ea0..a9d5e9e6a732 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -56,7 +56,7 @@ static int do_help(int argc, char **argv) " %s batch file FILE\n" " %s version\n" "\n" - " OBJECT := { prog | map | cgroup | perf | net }\n" + " OBJECT := { prog | map | cgroup | perf | net | feature }\n" " " HELP_SPEC_OPTIONS "\n" "", bin_name, bin_name, bin_name); @@ -187,6 +187,7 @@ static const struct cmd cmds[] = { { "cgroup", do_cgroup }, { "perf", do_perf }, { "net", do_net }, + { "feature", do_feature }, { "version", do_version }, { 0 } }; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 052c91d4dc55..5cfc6601de9b 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -145,6 +145,7 @@ int do_cgroup(int argc, char **arg); int do_perf(int argc, char **arg); int do_net(int argc, char **arg); int do_tracelog(int argc, char **arg); +int do_feature(int argc, char **argv); int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what); int prog_parse_fd(int *argc, char ***argv); From 7a4522bbef0c4c63a87b5363840460996aff97c3 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:51 +0000 Subject: [PATCH 16/68] tools: bpftool: add probes for /proc/ eBPF parameters Add a set of probes to dump the eBPF-related parameters available from /proc/: availability of bpf() syscall for unprivileged users, JIT compiler status and hardening status, kallsyms exports status. Sample output: # bpftool feature probe kernel Scanning system configuration... bpf() syscall for unprivileged users is enabled JIT compiler is disabled JIT compiler hardening is disabled JIT compiler kallsyms exports are disabled Global memory limit for JIT compiler for unprivileged users \ is 264241152 bytes ... # bpftool --json --pretty feature probe kernel { "system_config": { "unprivileged_bpf_disabled": 0, "bpf_jit_enable": 0, "bpf_jit_harden": 0, "bpf_jit_kallsyms": 0, "bpf_jit_limit": 264241152 }, ... } These probes are skipped if procfs is not mounted. v4: - Add bpf_jit_limit parameter. v2: - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/feature.c | 187 ++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 954fb12a5228..cedc44a61c6e 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -13,11 +14,29 @@ #include "main.h" +#ifndef PROC_SUPER_MAGIC +# define PROC_SUPER_MAGIC 0x9fa0 +#endif + enum probe_component { COMPONENT_UNSPEC, COMPONENT_KERNEL, }; +/* Miscellaneous utility functions */ + +static bool check_procfs(void) +{ + struct statfs st_fs; + + if (statfs("/proc", &st_fs) < 0) + return false; + if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC) + return false; + + return true; +} + /* Printing utility functions */ static void @@ -42,6 +61,153 @@ print_start_section(const char *json_title, const char *plain_title) /* Probing functions */ +static int read_procfs(const char *path) +{ + char *endptr, *line = NULL; + size_t len = 0; + FILE *fd; + int res; + + fd = fopen(path, "r"); + if (!fd) + return -1; + + res = getline(&line, &len, fd); + fclose(fd); + if (res < 0) + return -1; + + errno = 0; + res = strtol(line, &endptr, 10); + if (errno || *line == '\0' || *endptr != '\n') + res = -1; + free(line); + + return res; +} + +static void probe_unprivileged_disabled(void) +{ + int res; + + res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled"); + if (json_output) { + jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res); + } else { + switch (res) { + case 0: + printf("bpf() syscall for unprivileged users is enabled\n"); + break; + case 1: + printf("bpf() syscall restricted to privileged users\n"); + break; + case -1: + printf("Unable to retrieve required privileges for bpf() syscall\n"); + break; + default: + printf("bpf() syscall restriction has unknown value %d\n", res); + } + } +} + +static void probe_jit_enable(void) +{ + int res; + + res = read_procfs("/proc/sys/net/core/bpf_jit_enable"); + if (json_output) { + jsonw_int_field(json_wtr, "bpf_jit_enable", res); + } else { + switch (res) { + case 0: + printf("JIT compiler is disabled\n"); + break; + case 1: + printf("JIT compiler is enabled\n"); + break; + case 2: + printf("JIT compiler is enabled with debugging traces in kernel logs\n"); + break; + case -1: + printf("Unable to retrieve JIT-compiler status\n"); + break; + default: + printf("JIT-compiler status has unknown value %d\n", + res); + } + } +} + +static void probe_jit_harden(void) +{ + int res; + + res = read_procfs("/proc/sys/net/core/bpf_jit_harden"); + if (json_output) { + jsonw_int_field(json_wtr, "bpf_jit_harden", res); + } else { + switch (res) { + case 0: + printf("JIT compiler hardening is disabled\n"); + break; + case 1: + printf("JIT compiler hardening is enabled for unprivileged users\n"); + break; + case 2: + printf("JIT compiler hardening is enabled for all users\n"); + break; + case -1: + printf("Unable to retrieve JIT hardening status\n"); + break; + default: + printf("JIT hardening status has unknown value %d\n", + res); + } + } +} + +static void probe_jit_kallsyms(void) +{ + int res; + + res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms"); + if (json_output) { + jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res); + } else { + switch (res) { + case 0: + printf("JIT compiler kallsyms exports are disabled\n"); + break; + case 1: + printf("JIT compiler kallsyms exports are enabled for root\n"); + break; + case -1: + printf("Unable to retrieve JIT kallsyms export status\n"); + break; + default: + printf("JIT kallsyms exports status has unknown value %d\n", res); + } + } +} + +static void probe_jit_limit(void) +{ + int res; + + res = read_procfs("/proc/sys/net/core/bpf_jit_limit"); + if (json_output) { + jsonw_int_field(json_wtr, "bpf_jit_limit", res); + } else { + switch (res) { + case -1: + printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n"); + break; + default: + printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res); + } + } +} + static bool probe_bpf_syscall(void) { bool res; @@ -88,6 +254,27 @@ static int do_probe(int argc, char **argv) if (json_output) jsonw_start_object(json_wtr); + switch (target) { + case COMPONENT_KERNEL: + case COMPONENT_UNSPEC: + print_start_section("system_config", + "Scanning system configuration..."); + if (check_procfs()) { + probe_unprivileged_disabled(); + probe_jit_enable(); + probe_jit_harden(); + probe_jit_kallsyms(); + probe_jit_limit(); + } else { + p_info("/* procfs not mounted, skipping related probes */"); + } + if (json_output) + jsonw_end_object(json_wtr); + else + printf("\n"); + break; + } + print_start_section("syscall_config", "Scanning system call availability..."); From 4567b983f78c8c496d09868a3b108fb496c8d2c9 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:52 +0000 Subject: [PATCH 17/68] tools: bpftool: add probes for kernel configuration options Add probes to dump a number of options set (or not set) for compiling the kernel image. These parameters provide information about what BPF components should be available on the system. A number of them are not directly related to eBPF, but are in fact used in the kernel as conditions on which to compile, or not to compile, some of the eBPF helper functions. Sample output: # bpftool feature probe kernel Scanning system configuration... ... CONFIG_BPF is set to y CONFIG_BPF_SYSCALL is set to y CONFIG_HAVE_EBPF_JIT is set to y ... # bpftool --pretty --json feature probe kernel { "system_config": { ... "CONFIG_BPF": "y", "CONFIG_BPF_SYSCALL": "y", "CONFIG_HAVE_EBPF_JIT": "y", ... } } v5: - Declare options[] array in probe_kernel_image_config() as static. v4: - Add some options to the list: - CONFIG_TRACING - CONFIG_KPROBE_EVENTS - CONFIG_UPROBE_EVENTS - CONFIG_FTRACE_SYSCALLS - Add comments about those options in the source code. v3: - Add a comment about /proc/config.gz not being supported as a path for the config file at this time. - Use p_info() instead of p_err() on failure to get options from config file, as bpftool keeps probing other parameters and that would possibly create duplicate "error" entries for JSON. v2: - Remove C-style macros output from this patch. - NOT addressed: grouping of those config options into subsections (I don't see an easy way of grouping them at the moment, please see also the discussion on v1 thread). Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/feature.c | 182 ++++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index cedc44a61c6e..4a2867439ab7 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -48,6 +48,30 @@ print_bool_feature(const char *feat_name, const char *plain_name, bool res) printf("%s is %savailable\n", plain_name, res ? "" : "NOT "); } +static void print_kernel_option(const char *name, const char *value) +{ + char *endptr; + int res; + + if (json_output) { + if (!value) { + jsonw_null_field(json_wtr, name); + return; + } + errno = 0; + res = strtol(value, &endptr, 0); + if (!errno && *endptr == '\n') + jsonw_int_field(json_wtr, name, res); + else + jsonw_string_field(json_wtr, name, value); + } else { + if (value) + printf("%s is set to %s\n", name, value); + else + printf("%s is not set\n", name); + } +} + static void print_start_section(const char *json_title, const char *plain_title) { @@ -208,6 +232,163 @@ static void probe_jit_limit(void) } } +static char *get_kernel_config_option(FILE *fd, const char *option) +{ + size_t line_n = 0, optlen = strlen(option); + char *res, *strval, *line = NULL; + ssize_t n; + + rewind(fd); + while ((n = getline(&line, &line_n, fd)) > 0) { + if (strncmp(line, option, optlen)) + continue; + /* Check we have at least '=', value, and '\n' */ + if (strlen(line) < optlen + 3) + continue; + if (*(line + optlen) != '=') + continue; + + /* Trim ending '\n' */ + line[strlen(line) - 1] = '\0'; + + /* Copy and return config option value */ + strval = line + optlen + 1; + res = strdup(strval); + free(line); + return res; + } + free(line); + + return NULL; +} + +static void probe_kernel_image_config(void) +{ + static const char * const options[] = { + /* Enable BPF */ + "CONFIG_BPF", + /* Enable bpf() syscall */ + "CONFIG_BPF_SYSCALL", + /* Does selected architecture support eBPF JIT compiler */ + "CONFIG_HAVE_EBPF_JIT", + /* Compile eBPF JIT compiler */ + "CONFIG_BPF_JIT", + /* Avoid compiling eBPF interpreter (use JIT only) */ + "CONFIG_BPF_JIT_ALWAYS_ON", + + /* cgroups */ + "CONFIG_CGROUPS", + /* BPF programs attached to cgroups */ + "CONFIG_CGROUP_BPF", + /* bpf_get_cgroup_classid() helper */ + "CONFIG_CGROUP_NET_CLASSID", + /* bpf_skb_{,ancestor_}cgroup_id() helpers */ + "CONFIG_SOCK_CGROUP_DATA", + + /* Tracing: attach BPF to kprobes, tracepoints, etc. */ + "CONFIG_BPF_EVENTS", + /* Kprobes */ + "CONFIG_KPROBE_EVENTS", + /* Uprobes */ + "CONFIG_UPROBE_EVENTS", + /* Tracepoints */ + "CONFIG_TRACING", + /* Syscall tracepoints */ + "CONFIG_FTRACE_SYSCALLS", + /* bpf_override_return() helper support for selected arch */ + "CONFIG_FUNCTION_ERROR_INJECTION", + /* bpf_override_return() helper */ + "CONFIG_BPF_KPROBE_OVERRIDE", + + /* Network */ + "CONFIG_NET", + /* AF_XDP sockets */ + "CONFIG_XDP_SOCKETS", + /* BPF_PROG_TYPE_LWT_* and related helpers */ + "CONFIG_LWTUNNEL_BPF", + /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */ + "CONFIG_NET_ACT_BPF", + /* BPF_PROG_TYPE_SCHED_CLS, TC filters */ + "CONFIG_NET_CLS_BPF", + /* TC clsact qdisc */ + "CONFIG_NET_CLS_ACT", + /* Ingress filtering with TC */ + "CONFIG_NET_SCH_INGRESS", + /* bpf_skb_get_xfrm_state() helper */ + "CONFIG_XFRM", + /* bpf_get_route_realm() helper */ + "CONFIG_IP_ROUTE_CLASSID", + /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */ + "CONFIG_IPV6_SEG6_BPF", + /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */ + "CONFIG_BPF_LIRC_MODE2", + /* BPF stream parser and BPF socket maps */ + "CONFIG_BPF_STREAM_PARSER", + /* xt_bpf module for passing BPF programs to netfilter */ + "CONFIG_NETFILTER_XT_MATCH_BPF", + /* bpfilter back-end for iptables */ + "CONFIG_BPFILTER", + /* bpftilter module with "user mode helper" */ + "CONFIG_BPFILTER_UMH", + + /* test_bpf module for BPF tests */ + "CONFIG_TEST_BPF", + }; + char *value, *buf = NULL; + struct utsname utsn; + char path[PATH_MAX]; + size_t i, n; + ssize_t ret; + FILE *fd; + + if (uname(&utsn)) + goto no_config; + + snprintf(path, sizeof(path), "/boot/config-%s", utsn.release); + + fd = fopen(path, "r"); + if (!fd && errno == ENOENT) { + /* Some distributions put the config file at /proc/config, give + * it a try. + * Sometimes it is also at /proc/config.gz but we do not try + * this one for now, it would require linking against libz. + */ + fd = fopen("/proc/config", "r"); + } + if (!fd) { + p_info("skipping kernel config, can't open file: %s", + strerror(errno)); + goto no_config; + } + /* Sanity checks */ + ret = getline(&buf, &n, fd); + ret = getline(&buf, &n, fd); + if (!buf || !ret) { + p_info("skipping kernel config, can't read from file: %s", + strerror(errno)); + free(buf); + goto no_config; + } + if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) { + p_info("skipping kernel config, can't find correct file"); + free(buf); + goto no_config; + } + free(buf); + + for (i = 0; i < ARRAY_SIZE(options); i++) { + value = get_kernel_config_option(fd, options[i]); + print_kernel_option(options[i], value); + free(value); + } + fclose(fd); + return; + +no_config: + for (i = 0; i < ARRAY_SIZE(options); i++) + print_kernel_option(options[i], NULL); +} + static bool probe_bpf_syscall(void) { bool res; @@ -268,6 +449,7 @@ static int do_probe(int argc, char **argv) } else { p_info("/* procfs not mounted, skipping related probes */"); } + probe_kernel_image_config(); if (json_output) jsonw_end_object(json_wtr); else From 1bf4b05810fe38c5f09973295e8d4234a4fd5d87 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:53 +0000 Subject: [PATCH 18/68] tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/feature.c | 48 +++++++++++++++++- tools/lib/bpf/Build | 2 +- tools/lib/bpf/libbpf.h | 11 ++++ tools/lib/bpf/libbpf.map | 5 ++ tools/lib/bpf/libbpf_probes.c | 95 +++++++++++++++++++++++++++++++++++ 5 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 tools/lib/bpf/libbpf_probes.c diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 4a2867439ab7..d6508dde4808 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (c) 2019 Netronome Systems, Inc. */ +#include #include #include #include @@ -11,6 +12,7 @@ #include #include +#include #include "main.h" @@ -83,6 +85,17 @@ print_start_section(const char *json_title, const char *plain_title) } } +static void +print_end_then_start_section(const char *json_title, const char *plain_title) +{ + if (json_output) + jsonw_end_object(json_wtr); + else + printf("\n"); + + print_start_section(json_title, plain_title); +} + /* Probing functions */ static int read_procfs(const char *path) @@ -403,9 +416,33 @@ static bool probe_bpf_syscall(void) return res; } +static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types) +{ + const char *plain_comment = "eBPF program_type "; + char feat_name[128], plain_desc[128]; + size_t maxlen; + bool res; + + res = bpf_probe_prog_type(prog_type, 0); + + supported_types[prog_type] |= res; + + maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; + if (strlen(prog_type_name[prog_type]) > maxlen) { + p_info("program type name too long"); + return; + } + + sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]); + sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]); + print_bool_feature(feat_name, plain_desc, res); +} + static int do_probe(int argc, char **argv) { enum probe_component target = COMPONENT_UNSPEC; + bool supported_types[128] = {}; + unsigned int i; /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN). * Let's approximate, and restrict usage to root user only. @@ -460,8 +497,17 @@ static int do_probe(int argc, char **argv) print_start_section("syscall_config", "Scanning system call availability..."); - probe_bpf_syscall(); + if (!probe_bpf_syscall()) + /* bpf() syscall unavailable, don't probe other BPF features */ + goto exit_close_json; + print_end_then_start_section("program_types", + "Scanning eBPF program types..."); + + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) + probe_prog_type(i, supported_types); + +exit_close_json: if (json_output) { /* End current "section" of probes */ jsonw_end_object(json_wtr); diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index 197b40f5b5c6..bfd9bfc82c3b 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1 +1 @@ -libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o +libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 5f68d7b75215..8e63821109ab 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -355,6 +355,17 @@ LIBBPF_API const struct bpf_line_info * bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, __u32 insn_off, __u32 nr_skip); +/* + * Probe for supported system features + * + * Note that running many of these probes in a short amount of time can cause + * the kernel to reach the maximal size of lockable memory allowed for the + * user, causing subsequent probes to fail. In this case, the caller may want + * to adjust that limit with setrlimit(). + */ +LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, + __u32 ifindex); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index cd02cd4e2cc3..c7ec3ffa24e9 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -124,3 +124,8 @@ LIBBPF_0.0.1 { local: *; }; + +LIBBPF_0.0.2 { + global: + bpf_probe_prog_type; +} LIBBPF_0.0.1; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c new file mode 100644 index 000000000000..056c0c186f2a --- /dev/null +++ b/tools/lib/bpf/libbpf_probes.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2019 Netronome Systems, Inc. */ + +#include +#include +#include + +#include +#include + +#include "bpf.h" +#include "libbpf.h" + +static int get_kernel_version(void) +{ + int version, subversion, patchlevel; + struct utsname utsn; + + /* Return 0 on failure, and attempt to probe with empty kversion */ + if (uname(&utsn)) + return 0; + + if (sscanf(utsn.release, "%d.%d.%d", + &version, &subversion, &patchlevel) != 3) + return 0; + + return (version << 16) + (subversion << 8) + patchlevel; +} + +static void +probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, + size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex) +{ + struct bpf_load_program_attr xattr = {}; + int fd; + + switch (prog_type) { + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; + break; + case BPF_PROG_TYPE_KPROBE: + xattr.kern_version = get_kernel_version(); + break; + case BPF_PROG_TYPE_UNSPEC: + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_LWT_IN: + case BPF_PROG_TYPE_LWT_OUT: + case BPF_PROG_TYPE_LWT_XMIT: + case BPF_PROG_TYPE_SOCK_OPS: + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_LWT_SEG6LOCAL: + case BPF_PROG_TYPE_LIRC_MODE2: + case BPF_PROG_TYPE_SK_REUSEPORT: + case BPF_PROG_TYPE_FLOW_DISSECTOR: + default: + break; + } + + xattr.prog_type = prog_type; + xattr.insns = insns; + xattr.insns_cnt = insns_cnt; + xattr.license = "GPL"; + xattr.prog_ifindex = ifindex; + + fd = bpf_load_program_xattr(&xattr, buf, buf_len); + if (fd >= 0) + close(fd); +} + +bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) +{ + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() + }; + + if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS) + /* nfp returns -EINVAL on exit(0) with TC offload */ + insns[0].imm = 2; + + errno = 0; + probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex); + + return errno != EINVAL && errno != EOPNOTSUPP; +} From f99e166397f0298fe78bce24c55c6d074f9bf196 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:54 +0000 Subject: [PATCH 19/68] tools: bpftool: add probes for eBPF map types Add new probes for eBPF map types, to detect what are the ones available on the system. Try creating one map of each type, and see if the kernel complains. Sample output: # bpftool feature probe kernel ... Scanning eBPF map types... eBPF map_type hash is available eBPF map_type array is available eBPF map_type prog_array is available ... # bpftool --json --pretty feature probe kernel { ... "map_types": { "have_hash_map_type": true, "have_array_map_type": true, "have_prog_array_map_type": true, ... } } v5: - In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section. v3: - Use a switch with all enum values for setting specific map parameters, so that gcc complains at compile time (-Wswitch-enum) if new map types were added to the kernel but libbpf was not updated. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/feature.c | 26 +++++++++++ tools/bpf/bpftool/main.h | 3 ++ tools/bpf/bpftool/map.c | 4 +- tools/lib/bpf/libbpf.h | 1 + tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/libbpf_probes.c | 84 +++++++++++++++++++++++++++++++++++ 6 files changed, 118 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index d6508dde4808..cc731475c74b 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -438,6 +438,26 @@ static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types) print_bool_feature(feat_name, plain_desc, res); } +static void probe_map_type(enum bpf_map_type map_type) +{ + const char *plain_comment = "eBPF map_type "; + char feat_name[128], plain_desc[128]; + size_t maxlen; + bool res; + + res = bpf_probe_map_type(map_type, 0); + + maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; + if (strlen(map_type_name[map_type]) > maxlen) { + p_info("map type name too long"); + return; + } + + sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]); + sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]); + print_bool_feature(feat_name, plain_desc, res); +} + static int do_probe(int argc, char **argv) { enum probe_component target = COMPONENT_UNSPEC; @@ -507,6 +527,12 @@ static int do_probe(int argc, char **argv) for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) probe_prog_type(i, supported_types); + print_end_then_start_section("map_types", + "Scanning eBPF map types..."); + + for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) + probe_map_type(i); + exit_close_json: if (json_output) { /* End current "section" of probes */ diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 5cfc6601de9b..d7dd84d3c660 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -75,6 +75,9 @@ static const char * const prog_type_name[] = { [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", }; +extern const char * const map_type_name[]; +extern const size_t map_type_name_size; + enum bpf_obj_type { BPF_OBJ_UNKNOWN, BPF_OBJ_PROG, diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 850c99ac980f..f15c520195b7 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -21,7 +21,7 @@ #include "json_writer.h" #include "main.h" -static const char * const map_type_name[] = { +const char * const map_type_name[] = { [BPF_MAP_TYPE_UNSPEC] = "unspec", [BPF_MAP_TYPE_HASH] = "hash", [BPF_MAP_TYPE_ARRAY] = "array", @@ -48,6 +48,8 @@ static const char * const map_type_name[] = { [BPF_MAP_TYPE_STACK] = "stack", }; +const size_t map_type_name_size = ARRAY_SIZE(map_type_name); + static bool map_is_per_cpu(__u32 type) { return type == BPF_MAP_TYPE_PERCPU_HASH || diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 8e63821109ab..72385f6f9415 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -365,6 +365,7 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, */ LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex); +LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); #ifdef __cplusplus } /* extern "C" */ diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c7ec3ffa24e9..bb2dfc3b2d7b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -127,5 +127,6 @@ LIBBPF_0.0.1 { LIBBPF_0.0.2 { global: + bpf_probe_map_type; bpf_probe_prog_type; } LIBBPF_0.0.1; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 056c0c186f2a..f511bd317b87 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -93,3 +93,87 @@ bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex) return errno != EINVAL && errno != EOPNOTSUPP; } + +bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) +{ + int key_size, value_size, max_entries, map_flags; + struct bpf_create_map_attr attr = {}; + int fd = -1, fd_inner; + + key_size = sizeof(__u32); + value_size = sizeof(__u32); + max_entries = 1; + map_flags = 0; + + switch (map_type) { + case BPF_MAP_TYPE_STACK_TRACE: + value_size = sizeof(__u64); + break; + case BPF_MAP_TYPE_LPM_TRIE: + key_size = sizeof(__u64); + value_size = sizeof(__u64); + map_flags = BPF_F_NO_PREALLOC; + break; + case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: + key_size = sizeof(struct bpf_cgroup_storage_key); + value_size = sizeof(__u64); + max_entries = 0; + break; + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + key_size = 0; + break; + case BPF_MAP_TYPE_UNSPEC: + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_ARRAY: + case BPF_MAP_TYPE_PROG_ARRAY: + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + case BPF_MAP_TYPE_PERCPU_HASH: + case BPF_MAP_TYPE_PERCPU_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_LRU_HASH: + case BPF_MAP_TYPE_LRU_PERCPU_HASH: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH_OF_MAPS: + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_CPUMAP: + case BPF_MAP_TYPE_XSKMAP: + case BPF_MAP_TYPE_SOCKHASH: + case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + default: + break; + } + + if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || + map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + /* TODO: probe for device, once libbpf has a function to create + * map-in-map for offload + */ + if (ifindex) + return false; + + fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH, + sizeof(__u32), sizeof(__u32), 1, 0); + if (fd_inner < 0) + return false; + fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32), + fd_inner, 1, 0); + close(fd_inner); + } else { + /* Note: No other restriction on map type probes for offload */ + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + attr.map_flags = map_flags; + attr.map_ifindex = ifindex; + + fd = bpf_create_map_xattr(&attr); + } + if (fd >= 0) + close(fd); + + return fd >= 0; +} From 2d3ea5e85dd867712ba8747cb01c2d88376ead5c Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:55 +0000 Subject: [PATCH 20/68] tools: bpftool: add probes for eBPF helper functions Similarly to what was done for program types and map types, add a set of probes to test the availability of the different eBPF helper functions on the current system. For each known program type, all known helpers are tested, in order to establish a compatibility matrix. Output is provided as a set of lists of available helpers, one per program type. Sample output: # bpftool feature probe kernel ... Scanning eBPF helper functions... eBPF helpers supported for program type socket_filter: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... eBPF helpers supported for program type kprobe: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... # bpftool --json --pretty feature probe kernel { ... "helpers": { "socket_filter_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], "kprobe_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], ... } } v5: - In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section. v4: - Use "enum bpf_func_id" instead of "__u32" in bpf_probe_helper() declaration for the type of the argument used to pass the id of the helper to probe. - Undef BPF_HELPER_MAKE_ENTRY after using it. v3: - Do not pass kernel version from bpftool to libbpf probes (kernel version for testing program with kprobes is retrieved directly from libbpf). - Dump one list of available helpers per program type (instead of one list of compatible program types per helper). v2: - Move probes from bpftool to libbpf. - Test all program types for each helper, print a list of working prog types for each helper. - Fall back on include/uapi/linux/bpf.h for names and ids of helpers. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- .../bpftool/Documentation/bpftool-feature.rst | 4 ++ tools/bpf/bpftool/feature.c | 51 +++++++++++++++ tools/lib/bpf/libbpf.h | 2 + tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/libbpf_probes.c | 63 +++++++++++++++++++ 5 files changed, 121 insertions(+) diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 40ac13c0b782..255e3b3629a0 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -30,6 +30,10 @@ DESCRIPTION Keyword **kernel** can be omitted. + Note that when probed, some eBPF helpers (e.g. + **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may + print warnings to kernel logs. + **bpftool feature help** Print short help message. diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index cc731475c74b..55c8d215ca44 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -25,6 +25,13 @@ enum probe_component { COMPONENT_KERNEL, }; +#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name +static const char * const helper_name[] = { + __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY) +}; + +#undef BPF_HELPER_MAKE_ENTRY + /* Miscellaneous utility functions */ static bool check_procfs(void) @@ -458,6 +465,44 @@ static void probe_map_type(enum bpf_map_type map_type) print_bool_feature(feat_name, plain_desc, res); } +static void +probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type) +{ + const char *ptype_name = prog_type_name[prog_type]; + char feat_name[128]; + unsigned int id; + bool res; + + if (json_output) { + sprintf(feat_name, "%s_available_helpers", ptype_name); + jsonw_name(json_wtr, feat_name); + jsonw_start_array(json_wtr); + } else { + printf("eBPF helpers supported for program type %s:", + ptype_name); + } + + for (id = 1; id < ARRAY_SIZE(helper_name); id++) { + if (!supported_type) + res = false; + else + res = bpf_probe_helper(id, prog_type, 0); + + if (json_output) { + if (res) + jsonw_string(json_wtr, helper_name[id]); + } else { + if (res) + printf("\n\t- %s", helper_name[id]); + } + } + + if (json_output) + jsonw_end_array(json_wtr); + else + printf("\n"); +} + static int do_probe(int argc, char **argv) { enum probe_component target = COMPONENT_UNSPEC; @@ -533,6 +578,12 @@ static int do_probe(int argc, char **argv) for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) probe_map_type(i); + print_end_then_start_section("helpers", + "Scanning eBPF helper functions..."); + + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) + probe_helpers_for_progtype(i, supported_types[i]); + exit_close_json: if (json_output) { /* End current "section" of probes */ diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 72385f6f9415..62ae6cb93da1 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -366,6 +366,8 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo, LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex); LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex); +LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, + enum bpf_prog_type prog_type, __u32 ifindex); #ifdef __cplusplus } /* extern "C" */ diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index bb2dfc3b2d7b..266bc95d0142 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -127,6 +127,7 @@ LIBBPF_0.0.1 { LIBBPF_0.0.2 { global: + bpf_probe_helper; bpf_probe_map_type; bpf_probe_prog_type; } LIBBPF_0.0.1; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index f511bd317b87..8c3a1c04dcb2 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -2,7 +2,11 @@ /* Copyright (c) 2019 Netronome Systems, Inc. */ #include +#include +#include +#include #include +#include #include #include @@ -11,6 +15,37 @@ #include "bpf.h" #include "libbpf.h" +static bool grep(const char *buffer, const char *pattern) +{ + return !!strstr(buffer, pattern); +} + +static int get_vendor_id(int ifindex) +{ + char ifname[IF_NAMESIZE], path[64], buf[8]; + ssize_t len; + int fd; + + if (!if_indextoname(ifindex, ifname)) + return -1; + + snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname); + + fd = open(path, O_RDONLY); + if (fd < 0) + return -1; + + len = read(fd, buf, sizeof(buf)); + close(fd); + if (len < 0) + return -1; + if (len >= (ssize_t)sizeof(buf)) + return -1; + buf[len] = '\0'; + + return strtol(buf, NULL, 0); +} + static int get_kernel_version(void) { int version, subversion, patchlevel; @@ -177,3 +212,31 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex) return fd >= 0; } + +bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type, + __u32 ifindex) +{ + struct bpf_insn insns[2] = { + BPF_EMIT_CALL(id), + BPF_EXIT_INSN() + }; + char buf[4096] = {}; + bool res; + + probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf), + ifindex); + res = !grep(buf, "invalid func ") && !grep(buf, "unknown func "); + + if (ifindex) { + switch (get_vendor_id(ifindex)) { + case 0x19ee: /* Netronome specific */ + res = res && !grep(buf, "not supported by FW") && + !grep(buf, "unsupported function id"); + break; + default: + break; + } + } + + return res; +} From d267cff46753b0e8b2f169ff4a3f1bb40c2387a8 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:56 +0000 Subject: [PATCH 21/68] tools: bpftool: add C-style "#define" output for probes Make bpftool able to dump a subset of the parameters collected by probing the system as a listing of C-style #define macros, so that external projects can reuse the result of this probing and build BPF-based project in accordance with the features available on the system. The new "macros" keyword is used to select this output. An additional "prefix" keyword is added so that users can select a custom prefix for macro names, in order to avoid any namespace conflict. Sample output: # bpftool feature probe kernel macros prefix FOO_ /*** System call availability ***/ #define FOO_HAVE_BPF_SYSCALL /*** eBPF program types ***/ #define FOO_HAVE_SOCKET_FILTER_PROG_TYPE #define FOO_HAVE_KPROBE_PROG_TYPE #define FOO_HAVE_SCHED_CLS_PROG_TYPE ... /*** eBPF map types ***/ #define FOO_HAVE_HASH_MAP_TYPE #define FOO_HAVE_ARRAY_MAP_TYPE #define FOO_HAVE_PROG_ARRAY_MAP_TYPE ... /*** eBPF helper functions ***/ /* * Use FOO_HAVE_PROG_TYPE_HELPER(prog_type_name, helper_name) * to determine if is available for , * e.g. * #if FOO_HAVE_PROG_TYPE_HELPER(xdp, bpf_redirect) * // do stuff with this helper * #elif * // use a workaround * #endif */ #define FOO_HAVE_PROG_TYPE_HELPER(prog_type, helper) \ FOO_BPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper ... #define FOO_BPF__PROG_TYPE_socket_filter__HELPER_bpf_probe_read 0 #define FOO_BPF__PROG_TYPE_socket_filter__HELPER_bpf_ktime_get_ns 1 #define FOO_BPF__PROG_TYPE_socket_filter__HELPER_bpf_trace_printk 1 ... v3: - Change output for helpers again: add a HAVE_PROG_TYPE_HELPER(type, helper) macro that can be used to tell if is available for program . v2: - #define-based output added as a distinct patch. - "HAVE_" prefix appended to macro names. - Output limited to bpf() syscall availability, BPF prog and map types, helper functions. In this version kernel config options, procfs parameter or kernel version are intentionally left aside. - Following the change on helper probes, format for helper probes in this output style has changed (now a list of compatible program types). Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- .../bpftool/Documentation/bpftool-feature.rst | 13 +- tools/bpf/bpftool/feature.c | 148 ++++++++++++++---- 2 files changed, 132 insertions(+), 29 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 255e3b3629a0..53092995f46b 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -19,15 +19,24 @@ SYNOPSIS MAP COMMANDS ============= -| **bpftool** **feature probe** [**kernel**] +| **bpftool** **feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]] | **bpftool** **feature help** DESCRIPTION =========== - **bpftool feature probe** [**kernel**] + **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]] Probe the running kernel and dump a number of eBPF-related parameters, such as availability of the **bpf()** system call. + If the **macros** keyword (but not the **-j** option) is + passed, a subset of the output is dumped as a list of + **#define** macros that are ready to be included in a C + header file, for example. If, additionally, **prefix** is + used to define a *PREFIX*, the provided string will be used + as a prefix to the names of the macros: this can be used to + avoid conflicts on macro names when including the output of + this command as a header file. + Keyword **kernel** can be omitted. Note that when probed, some eBPF helpers (e.g. diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 55c8d215ca44..a62e637953b7 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -46,13 +46,25 @@ static bool check_procfs(void) return true; } +static void uppercase(char *str, size_t len) +{ + size_t i; + + for (i = 0; i < len && str[i] != '\0'; i++) + str[i] = toupper(str[i]); +} + /* Printing utility functions */ static void -print_bool_feature(const char *feat_name, const char *plain_name, bool res) +print_bool_feature(const char *feat_name, const char *plain_name, + const char *define_name, bool res, const char *define_prefix) { if (json_output) jsonw_bool_field(json_wtr, feat_name, res); + else if (define_prefix) + printf("#define %s%sHAVE_%s\n", define_prefix, + res ? "" : "NO_", define_name); else printf("%s is %savailable\n", plain_name, res ? "" : "NOT "); } @@ -62,6 +74,8 @@ static void print_kernel_option(const char *name, const char *value) char *endptr; int res; + /* No support for C-style ouptut */ + if (json_output) { if (!value) { jsonw_null_field(json_wtr, name); @@ -82,25 +96,31 @@ static void print_kernel_option(const char *name, const char *value) } static void -print_start_section(const char *json_title, const char *plain_title) +print_start_section(const char *json_title, const char *plain_title, + const char *define_comment, const char *define_prefix) { if (json_output) { jsonw_name(json_wtr, json_title); jsonw_start_object(json_wtr); + } else if (define_prefix) { + printf("%s\n", define_comment); } else { printf("%s\n", plain_title); } } static void -print_end_then_start_section(const char *json_title, const char *plain_title) +print_end_then_start_section(const char *json_title, const char *plain_title, + const char *define_comment, + const char *define_prefix) { if (json_output) jsonw_end_object(json_wtr); else printf("\n"); - print_start_section(json_title, plain_title); + print_start_section(json_title, plain_title, define_comment, + define_prefix); } /* Probing functions */ @@ -134,6 +154,8 @@ static void probe_unprivileged_disabled(void) { int res; + /* No support for C-style ouptut */ + res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled"); if (json_output) { jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res); @@ -158,6 +180,8 @@ static void probe_jit_enable(void) { int res; + /* No support for C-style ouptut */ + res = read_procfs("/proc/sys/net/core/bpf_jit_enable"); if (json_output) { jsonw_int_field(json_wtr, "bpf_jit_enable", res); @@ -186,6 +210,8 @@ static void probe_jit_harden(void) { int res; + /* No support for C-style ouptut */ + res = read_procfs("/proc/sys/net/core/bpf_jit_harden"); if (json_output) { jsonw_int_field(json_wtr, "bpf_jit_harden", res); @@ -214,6 +240,8 @@ static void probe_jit_kallsyms(void) { int res; + /* No support for C-style ouptut */ + res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms"); if (json_output) { jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res); @@ -238,6 +266,8 @@ static void probe_jit_limit(void) { int res; + /* No support for C-style ouptut */ + res = read_procfs("/proc/sys/net/core/bpf_jit_limit"); if (json_output) { jsonw_int_field(json_wtr, "bpf_jit_limit", res); @@ -409,7 +439,7 @@ no_config: print_kernel_option(options[i], NULL); } -static bool probe_bpf_syscall(void) +static bool probe_bpf_syscall(const char *define_prefix) { bool res; @@ -418,15 +448,18 @@ static bool probe_bpf_syscall(void) print_bool_feature("have_bpf_syscall", "bpf() syscall", - res); + "BPF_SYSCALL", + res, define_prefix); return res; } -static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types) +static void +probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, + const char *define_prefix) { + char feat_name[128], plain_desc[128], define_name[128]; const char *plain_comment = "eBPF program_type "; - char feat_name[128], plain_desc[128]; size_t maxlen; bool res; @@ -441,14 +474,18 @@ static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types) } sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]); + sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]); + uppercase(define_name, sizeof(define_name)); sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]); - print_bool_feature(feat_name, plain_desc, res); + print_bool_feature(feat_name, plain_desc, define_name, res, + define_prefix); } -static void probe_map_type(enum bpf_map_type map_type) +static void +probe_map_type(enum bpf_map_type map_type, const char *define_prefix) { + char feat_name[128], plain_desc[128], define_name[128]; const char *plain_comment = "eBPF map_type "; - char feat_name[128], plain_desc[128]; size_t maxlen; bool res; @@ -461,12 +498,16 @@ static void probe_map_type(enum bpf_map_type map_type) } sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]); + sprintf(define_name, "%s_map_type", map_type_name[map_type]); + uppercase(define_name, sizeof(define_name)); sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]); - print_bool_feature(feat_name, plain_desc, res); + print_bool_feature(feat_name, plain_desc, define_name, res, + define_prefix); } static void -probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type) +probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, + const char *define_prefix) { const char *ptype_name = prog_type_name[prog_type]; char feat_name[128]; @@ -477,7 +518,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type) sprintf(feat_name, "%s_available_helpers", ptype_name); jsonw_name(json_wtr, feat_name); jsonw_start_array(json_wtr); - } else { + } else if (!define_prefix) { printf("eBPF helpers supported for program type %s:", ptype_name); } @@ -491,6 +532,10 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type) if (json_output) { if (res) jsonw_string(json_wtr, helper_name[id]); + } else if (define_prefix) { + printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n", + define_prefix, ptype_name, helper_name[id], + res ? "1" : "0"); } else { if (res) printf("\n\t- %s", helper_name[id]); @@ -499,13 +544,14 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type) if (json_output) jsonw_end_array(json_wtr); - else + else if (!define_prefix) printf("\n"); } static int do_probe(int argc, char **argv) { enum probe_component target = COMPONENT_UNSPEC; + const char *define_prefix = NULL; bool supported_types[128] = {}; unsigned int i; @@ -527,21 +573,45 @@ static int do_probe(int argc, char **argv) } target = COMPONENT_KERNEL; NEXT_ARG(); + } else if (is_prefix(*argv, "macros") && !define_prefix) { + define_prefix = ""; + NEXT_ARG(); + } else if (is_prefix(*argv, "prefix")) { + if (!define_prefix) { + p_err("'prefix' argument can only be use after 'macros'"); + return -1; + } + if (strcmp(define_prefix, "")) { + p_err("'prefix' already defined"); + return -1; + } + NEXT_ARG(); + + if (!REQ_ARGS(1)) + return -1; + define_prefix = GET_ARG(); } else { - p_err("expected no more arguments, 'kernel', got: '%s'?", + p_err("expected no more arguments, 'kernel', 'macros' or 'prefix', got: '%s'?", *argv); return -1; } } - if (json_output) + if (json_output) { + define_prefix = NULL; jsonw_start_object(json_wtr); + } switch (target) { case COMPONENT_KERNEL: case COMPONENT_UNSPEC: + if (define_prefix) + break; + print_start_section("system_config", - "Scanning system configuration..."); + "Scanning system configuration...", + NULL, /* define_comment never used here */ + NULL); /* define_prefix always NULL here */ if (check_procfs()) { probe_unprivileged_disabled(); probe_jit_enable(); @@ -560,29 +630,53 @@ static int do_probe(int argc, char **argv) } print_start_section("syscall_config", - "Scanning system call availability..."); + "Scanning system call availability...", + "/*** System call availability ***/", + define_prefix); - if (!probe_bpf_syscall()) + if (!probe_bpf_syscall(define_prefix)) /* bpf() syscall unavailable, don't probe other BPF features */ goto exit_close_json; print_end_then_start_section("program_types", - "Scanning eBPF program types..."); + "Scanning eBPF program types...", + "/*** eBPF program types ***/", + define_prefix); for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) - probe_prog_type(i, supported_types); + probe_prog_type(i, supported_types, define_prefix); print_end_then_start_section("map_types", - "Scanning eBPF map types..."); + "Scanning eBPF map types...", + "/*** eBPF map types ***/", + define_prefix); for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) - probe_map_type(i); + probe_map_type(i, define_prefix); print_end_then_start_section("helpers", - "Scanning eBPF helper functions..."); + "Scanning eBPF helper functions...", + "/*** eBPF helper functions ***/", + define_prefix); + if (define_prefix) + printf("/*\n" + " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n" + " * to determine if is available for ,\n" + " * e.g.\n" + " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n" + " * // do stuff with this helper\n" + " * #elif\n" + " * // use a workaround\n" + " * #endif\n" + " */\n" + "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n" + " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n", + define_prefix, define_prefix, define_prefix, + define_prefix); for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) - probe_helpers_for_progtype(i, supported_types[i]); + probe_helpers_for_progtype(i, supported_types[i], + define_prefix); exit_close_json: if (json_output) { @@ -603,7 +697,7 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s probe [kernel]\n" + "Usage: %s %s probe [kernel] [macros [prefix PREFIX]]\n" " %s %s help\n" "", bin_name, argv[-2], bin_name, argv[-2]); From f9499fedf28547b496066b65de9180eaf3a7dc50 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:57 +0000 Subject: [PATCH 22/68] tools: bpftool: add probes for a network device bpftool gained support for probing the current system in order to see what program and map types, and what helpers are available on that system. This patch adds the possibility to pass an interface index to libbpf (and hence to the kernel) when trying to load the programs or to create the maps, in order to see what items a given network device can support. A new keyword "dev " can be used as an alternative to "kernel" to indicate that the given device should be tested. If no target ("dev" or "kernel") is specified bpftool defaults to probing the kernel. Sample output: # bpftool -p feature probe dev lo { "syscall_config": { "have_bpf_syscall": true }, "program_types": { "have_sched_cls_prog_type": false, "have_xdp_prog_type": false }, ... } As the target is a network device, /proc/ parameters and kernel configuration are NOT dumped. Availability of the bpf() syscall is still probed, so we can return early if that syscall is not usable (since there is no point in attempting the remaining probes in this case). Among the program types, only the ones that can be offloaded are probed. All map types are probed, as there is no specific rule telling which one could or could not be supported by a device in the future. All helpers are probed (but only for offload-able program types). Caveat: as bpftool does not attempt to attach programs to the device at the moment, probes do not entirely reflect what the device accepts: typically, for Netronome's nfp, results will announce that TC cls offload is available even if support has been deactivated (with e.g. ethtool -K eth1 hw-tc-offload off). v2: - All helpers are probed, whereas previous version would only probe the ones compatible with an offload-able program type. This is because we do not keep a default compatible program type for each helper anymore. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- .../bpftool/Documentation/bpftool-feature.rst | 18 ++++- tools/bpf/bpftool/feature.c | 69 ++++++++++++++++--- 2 files changed, 73 insertions(+), 14 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 53092995f46b..8d489a26e3c9 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst @@ -19,14 +19,18 @@ SYNOPSIS MAP COMMANDS ============= -| **bpftool** **feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]] +| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]] | **bpftool** **feature help** +| +| *COMPONENT* := { **kernel** | **dev** *NAME* } DESCRIPTION =========== **bpftool feature probe** [**kernel**] [**macros** [**prefix** *PREFIX*]] Probe the running kernel and dump a number of eBPF-related - parameters, such as availability of the **bpf()** system call. + parameters, such as availability of the **bpf()** system call, + JIT status, eBPF program types availability, eBPF helper + functions availability, and more. If the **macros** keyword (but not the **-j** option) is passed, a subset of the output is dumped as a list of @@ -37,12 +41,20 @@ DESCRIPTION avoid conflicts on macro names when including the output of this command as a header file. - Keyword **kernel** can be omitted. + Keyword **kernel** can be omitted. If no probe target is + specified, probing the kernel is the default behaviour. Note that when probed, some eBPF helpers (e.g. **bpf_trace_printk**\ () or **bpf_probe_write_user**\ ()) may print warnings to kernel logs. + **bpftool feature probe dev** *NAME* [**macros** [**prefix** *PREFIX*]] + Probe network device for supported eBPF features and dump + results to the console. + + The two keywords **macros** and **prefix** have the same + role as when probing the kernel. + **bpftool feature help** Print short help message. diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index a62e637953b7..993c6f1e5473 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -23,6 +24,7 @@ enum probe_component { COMPONENT_UNSPEC, COMPONENT_KERNEL, + COMPONENT_DEVICE, }; #define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name @@ -456,14 +458,24 @@ static bool probe_bpf_syscall(const char *define_prefix) static void probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, - const char *define_prefix) + const char *define_prefix, __u32 ifindex) { char feat_name[128], plain_desc[128], define_name[128]; const char *plain_comment = "eBPF program_type "; size_t maxlen; bool res; - res = bpf_probe_prog_type(prog_type, 0); + if (ifindex) + /* Only test offload-able program types */ + switch (prog_type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_XDP: + break; + default: + return; + } + + res = bpf_probe_prog_type(prog_type, ifindex); supported_types[prog_type] |= res; @@ -482,14 +494,15 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, } static void -probe_map_type(enum bpf_map_type map_type, const char *define_prefix) +probe_map_type(enum bpf_map_type map_type, const char *define_prefix, + __u32 ifindex) { char feat_name[128], plain_desc[128], define_name[128]; const char *plain_comment = "eBPF map_type "; size_t maxlen; bool res; - res = bpf_probe_map_type(map_type, 0); + res = bpf_probe_map_type(map_type, ifindex); maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; if (strlen(map_type_name[map_type]) > maxlen) { @@ -507,13 +520,23 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix) static void probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, - const char *define_prefix) + const char *define_prefix, __u32 ifindex) { const char *ptype_name = prog_type_name[prog_type]; char feat_name[128]; unsigned int id; bool res; + if (ifindex) + /* Only test helpers for offload-able program types */ + switch (prog_type) { + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_XDP: + break; + default: + return; + } + if (json_output) { sprintf(feat_name, "%s_available_helpers", ptype_name); jsonw_name(json_wtr, feat_name); @@ -527,7 +550,7 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type, if (!supported_type) res = false; else - res = bpf_probe_helper(id, prog_type, 0); + res = bpf_probe_helper(id, prog_type, ifindex); if (json_output) { if (res) @@ -553,7 +576,9 @@ static int do_probe(int argc, char **argv) enum probe_component target = COMPONENT_UNSPEC; const char *define_prefix = NULL; bool supported_types[128] = {}; + __u32 ifindex = 0; unsigned int i; + char *ifname; /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN). * Let's approximate, and restrict usage to root user only. @@ -573,6 +598,24 @@ static int do_probe(int argc, char **argv) } target = COMPONENT_KERNEL; NEXT_ARG(); + } else if (is_prefix(*argv, "dev")) { + NEXT_ARG(); + + if (target != COMPONENT_UNSPEC || ifindex) { + p_err("component to probe already specified"); + return -1; + } + if (!REQ_ARGS(1)) + return -1; + + target = COMPONENT_DEVICE; + ifname = GET_ARG(); + ifindex = if_nametoindex(ifname); + if (!ifindex) { + p_err("unrecognized netdevice '%s': %s", ifname, + strerror(errno)); + return -1; + } } else if (is_prefix(*argv, "macros") && !define_prefix) { define_prefix = ""; NEXT_ARG(); @@ -591,7 +634,7 @@ static int do_probe(int argc, char **argv) return -1; define_prefix = GET_ARG(); } else { - p_err("expected no more arguments, 'kernel', 'macros' or 'prefix', got: '%s'?", + p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?", *argv); return -1; } @@ -627,6 +670,8 @@ static int do_probe(int argc, char **argv) else printf("\n"); break; + default: + break; } print_start_section("syscall_config", @@ -644,7 +689,7 @@ static int do_probe(int argc, char **argv) define_prefix); for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) - probe_prog_type(i, supported_types, define_prefix); + probe_prog_type(i, supported_types, define_prefix, ifindex); print_end_then_start_section("map_types", "Scanning eBPF map types...", @@ -652,7 +697,7 @@ static int do_probe(int argc, char **argv) define_prefix); for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++) - probe_map_type(i, define_prefix); + probe_map_type(i, define_prefix, ifindex); print_end_then_start_section("helpers", "Scanning eBPF helper functions...", @@ -676,7 +721,7 @@ static int do_probe(int argc, char **argv) define_prefix); for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) probe_helpers_for_progtype(i, supported_types[i], - define_prefix); + define_prefix, ifindex); exit_close_json: if (json_output) { @@ -697,8 +742,10 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s probe [kernel] [macros [prefix PREFIX]]\n" + "Usage: %s %s probe [COMPONENT] [macros [prefix PREFIX]]\n" " %s %s help\n" + "\n" + " COMPONENT := { kernel | dev NAME }\n" "", bin_name, argv[-2], bin_name, argv[-2]); From 948703e80868a7ee00a9ca16c0e37ffe6933f9bb Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Thu, 17 Jan 2019 15:27:58 +0000 Subject: [PATCH 23/68] tools: bpftool: add bash completion for bpftool probes Add the bash completion related to the newly introduced "bpftool feature probe" command. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Reviewed-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/bash-completion/bpftool | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 47143d793b33..763dd12482aa 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -679,6 +679,25 @@ _bpftool() ;; esac ;; + feature) + case $command in + probe) + [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0 + [[ $prev == "prefix" ]] && return 0 + if _bpftool_search_list 'macros'; then + COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) ) + else + COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) ) + fi + _bpftool_one_of_list 'kernel dev' + return 0 + ;; + *) + [[ $prev == $object ]] && \ + COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) ) + ;; + esac + ;; esac } && complete -F _bpftool bpftool From ffcf7ce9332723cab5ae55575f3a55d1ce559bf3 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 18 Jan 2019 13:56:49 -0800 Subject: [PATCH 24/68] bpf: btf: add btf documentation This patch added documentation for BTF (BPF Debug Format). The document is placed under linux:Documentation/bpf directory. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- Documentation/bpf/btf.rst | 870 ++++++++++++++++++++++++++++++++++++ Documentation/bpf/index.rst | 7 + 2 files changed, 877 insertions(+) create mode 100644 Documentation/bpf/btf.rst diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst new file mode 100644 index 000000000000..1d434c3a268d --- /dev/null +++ b/Documentation/bpf/btf.rst @@ -0,0 +1,870 @@ +===================== +BPF Type Format (BTF) +===================== + +1. Introduction +*************** + +BTF (BPF Type Format) is the meta data format which +encodes the debug info related to BPF program/map. +The name BTF was used initially to describe +data types. The BTF was later extended to include +function info for defined subroutines, and line info +for source/line information. + +The debug info is used for map pretty print, function +signature, etc. The function signature enables better +bpf program/function kernel symbol. +The line info helps generate +source annotated translated byte code, jited code +and verifier log. + +The BTF specification contains two parts, + * BTF kernel API + * BTF ELF file format + +The kernel API is the contract between +user space and kernel. The kernel verifies +the BTF info before using it. +The ELF file format is a user space contract +between ELF file and libbpf loader. + +The type and string sections are part of the +BTF kernel API, describing the debug info +(mostly types related) referenced by the bpf program. +These two sections are discussed in +details in :ref:`BTF_Type_String`. + +.. _BTF_Type_String: + +2. BTF Type and String Encoding +******************************* + +The file ``include/uapi/linux/btf.h`` provides high +level definition on how types/strings are encoded. + +The beginning of data blob must be:: + + struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 type_off; /* offset of type section */ + __u32 type_len; /* length of type section */ + __u32 str_off; /* offset of string section */ + __u32 str_len; /* length of string section */ + }; + +The magic is ``0xeB9F``, which has different encoding for big and little +endian system, and can be used to test whether BTF is generated for +big or little endian target. +The btf_header is designed to be extensible with hdr_len equal to +``sizeof(struct btf_header)`` when the data blob is generated. + +2.1 String Encoding +=================== + +The first string in the string section must be a null string. +The rest of string table is a concatenation of other null-treminated +strings. + +2.2 Type Encoding +================= + +The type id ``0`` is reserved for ``void`` type. +The type section is parsed sequentially and the type id is assigned to +each recognized type starting from id ``1``. +Currently, the following types are supported:: + + #define BTF_KIND_INT 1 /* Integer */ + #define BTF_KIND_PTR 2 /* Pointer */ + #define BTF_KIND_ARRAY 3 /* Array */ + #define BTF_KIND_STRUCT 4 /* Struct */ + #define BTF_KIND_UNION 5 /* Union */ + #define BTF_KIND_ENUM 6 /* Enumeration */ + #define BTF_KIND_FWD 7 /* Forward */ + #define BTF_KIND_TYPEDEF 8 /* Typedef */ + #define BTF_KIND_VOLATILE 9 /* Volatile */ + #define BTF_KIND_CONST 10 /* Const */ + #define BTF_KIND_RESTRICT 11 /* Restrict */ + #define BTF_KIND_FUNC 12 /* Function */ + #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ + +Note that the type section encodes debug info, not just pure types. +``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. + +Each type contains the following common data:: + + struct btf_type { + __u32 name_off; + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members) + * bits 16-23: unused + * bits 24-27: kind (e.g. int, ptr, array...etc) + * bits 28-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + __u32 info; + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + union { + __u32 size; + __u32 type; + }; + }; + +For certain kinds, the common data are followed by kind specific data. +The ``name_off`` in ``struct btf_type`` specifies the offset in the string table. +The following details encoding of each kind. + +2.2.1 BTF_KIND_INT +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: any valid offset + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_INT + * ``info.vlen``: 0 + * ``size``: the size of the int type in bytes. + +``btf_type`` is followed by a ``u32`` with following bits arrangement:: + + #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) + #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) + #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) + +The ``BTF_INT_ENCODING`` has the following attributes:: + + #define BTF_INT_SIGNED (1 << 0) + #define BTF_INT_CHAR (1 << 1) + #define BTF_INT_BOOL (1 << 2) + +The ``BTF_INT_ENCODING()`` provides extra information, signness, +char, or bool, for the int type. The char and bool encoding +are mostly useful for pretty print. At most one encoding can +be specified for the int type. + +The ``BTF_INT_BITS()`` specifies the number of actual bits held by +this int type. For example, a 4-bit bitfield encodes +``BTF_INT_BITS()`` equals to 4. The ``btf_type.size * 8`` +must be equal to or greater than ``BTF_INT_BITS()`` for the type. +The maximum value of ``BTF_INT_BITS()`` is 128. + +The ``BTF_INT_OFFSET()`` specifies the starting bit offset to +calculate values for this int. For example, a bitfield struct +member has + + * btf member bit offset 100 from the start of the structure, + * btf member pointing to an int type, + * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` + +Then in the struct memory layout, this member will occupy +``4`` bits starting from bits ``100 + 2 = 102``. + +Alternatively, the bitfield struct member can be the following to +access the same bits as the above: + + * btf member bit offset 102, + * btf member pointing to an int type, + * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` + +The original intention of ``BTF_INT_OFFSET()`` is to provide +flexibility of bitfield encoding. +Currently, both llvm and pahole generates ``BTF_INT_OFFSET() = 0`` +for all int types. + +2.2.2 BTF_KIND_PTR +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_PTR + * ``info.vlen``: 0 + * ``type``: the pointee type of the pointer + +No additional type data follow ``btf_type``. + +2.2.3 BTF_KIND_ARRAY +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_ARRAY + * ``info.vlen``: 0 + * ``size/type``: 0, not used + +btf_type is followed by one "struct btf_array":: + + struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; + }; + +The ``struct btf_array`` encoding: + * ``type``: the element type + * ``index_type``: the index type + * ``nelems``: the number of elements for this array (``0`` is also allowed). + +The ``index_type`` can be any regular int types +(u8, u16, u32, u64, unsigned __int128). +The original design of including ``index_type`` follows dwarf +which has a ``index_type`` for its array type. +Currently in BTF, beyond type verification, the ``index_type`` is not used. + +The ``struct btf_array`` allows chaining through element type to represent +multiple dimensional arrays. For example, ``int a[5][6]``, the following +type system illustrates the chaining: + + * [1]: int + * [2]: array, ``btf_array.type = [1]``, ``btf_array.nelems = 6`` + * [3]: array, ``btf_array.type = [2]``, ``btf_array.nelems = 5`` + +Currently, both pahole and llvm collapse multiple dimensional array +into one dimensional array, e.g., ``a[5][6]``, the btf_array.nelems +equal to ``30``. This is because the original use case is map pretty +print where the whole array is dumped out so one dimensional array +is enough. As more BTF usage is explored, pahole and llvm can be +changed to generate proper chained representation for +multiple dimensional arrays. + +2.2.4 BTF_KIND_STRUCT +~~~~~~~~~~~~~~~~~~~~~ +2.2.5 BTF_KIND_UNION +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 or offset to a valid C identifier + * ``info.kind_flag``: 0 or 1 + * ``info.kind``: BTF_KIND_STRUCT or BTF_KIND_UNION + * ``info.vlen``: the number of struct/union members + * ``info.size``: the size of the struct/union in bytes + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_member``.:: + + struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; + }; + +``struct btf_member`` encoding: + * ``name_off``: offset to a valid C identifier + * ``type``: the member type + * ``offset``: + +If the type info ``kind_flag`` is not set, the offset contains +only bit offset of the member. Note that the base type of the +bitfield can only be int or enum type. If the bitfield size +is 32, the base type can be either int or enum type. +If the bitfield size is not 32, the base type must be int, +and int type ``BTF_INT_BITS()`` encodes the bitfield size. + +If the ``kind_flag`` is set, the ``btf_member.offset`` +contains both member bitfield size and bit offset. The +bitfield size and bit offset are calculated as below.:: + + #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) + #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) + +In this case, if the base type is an int type, it must +be a regular int type: + + * ``BTF_INT_OFFSET()`` must be 0. + * ``BTF_INT_BITS()`` must be equal to ``{1,2,4,8,16} * 8``. + +The following kernel patch introduced ``kind_flag`` and +explained why both modes exist: + + https://github.com/torvalds/linux/commit/9d5f9f701b1891466fb3dbb1806ad97716f95cc3#diff-fa650a64fdd3968396883d2fe8215ff3 + +2.2.6 BTF_KIND_ENUM +~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 or offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_ENUM + * ``info.vlen``: number of enum values + * ``size``: 4 + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum``.:: + + struct btf_enum { + __u32 name_off; + __s32 val; + }; + +The ``btf_enum`` encoding: + * ``name_off``: offset to a valid C identifier + * ``val``: any value + +2.2.7 BTF_KIND_FWD +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 for struct, 1 for union + * ``info.kind``: BTF_KIND_FWD + * ``info.vlen``: 0 + * ``type``: 0 + +No additional type data follow ``btf_type``. + +2.2.8 BTF_KIND_TYPEDEF +~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_TYPEDEF + * ``info.vlen``: 0 + * ``type``: the type which can be referred by name at ``name_off`` + +No additional type data follow ``btf_type``. + +2.2.9 BTF_KIND_VOLATILE +~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_VOLATILE + * ``info.vlen``: 0 + * ``type``: the type with ``volatile`` qualifier + +No additional type data follow ``btf_type``. + +2.2.10 BTF_KIND_CONST +~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_CONST + * ``info.vlen``: 0 + * ``type``: the type with ``const`` qualifier + +No additional type data follow ``btf_type``. + +2.2.11 BTF_KIND_RESTRICT +~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_RESTRICT + * ``info.vlen``: 0 + * ``type``: the type with ``restrict`` qualifier + +No additional type data follow ``btf_type``. + +2.2.12 BTF_KIND_FUNC +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_FUNC + * ``info.vlen``: 0 + * ``type``: a BTF_KIND_FUNC_PROTO type + +No additional type data follow ``btf_type``. + +A BTF_KIND_FUNC defines, not a type, but a subprogram (function) whose +signature is defined by ``type``. The subprogram is thus an instance of +that type. The BTF_KIND_FUNC may in turn be referenced by a func_info in +the :ref:`BTF_Ext_Section` (ELF) or in the arguments to +:ref:`BPF_Prog_Load` (ABI). + +2.2.13 BTF_KIND_FUNC_PROTO +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_FUNC_PROTO + * ``info.vlen``: # of parameters + * ``type``: the return type + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_param``.:: + + struct btf_param { + __u32 name_off; + __u32 type; + }; + +If a BTF_KIND_FUNC_PROTO type is referred by a BTF_KIND_FUNC type, +then ``btf_param.name_off`` must point to a valid C identifier +except for the possible last argument representing the variable +argument. The btf_param.type refers to parameter type. + +If the function has variable arguments, the last parameter +is encoded with ``name_off = 0`` and ``type = 0``. + +3. BTF Kernel API +***************** + +The following bpf syscall command involves BTF: + * BPF_BTF_LOAD: load a blob of BTF data into kernel + * BPF_MAP_CREATE: map creation with btf key and value type info. + * BPF_PROG_LOAD: prog load with btf function and line info. + * BPF_BTF_GET_FD_BY_ID: get a btf fd + * BPF_OBJ_GET_INFO_BY_FD: btf, func_info, line_info + and other btf related info are returned. + +The workflow typically looks like: +:: + + Application: + BPF_BTF_LOAD + | + v + BPF_MAP_CREATE and BPF_PROG_LOAD + | + V + ...... + + Introspection tool: + ...... + BPF_{PROG,MAP}_GET_NEXT_ID (get prog/map id's) + | + V + BPF_{PROG,MAP}_GET_FD_BY_ID (get a prog/map fd) + | + V + BPF_OBJ_GET_INFO_BY_FD (get bpf_prog_info/bpf_map_info with btf_id) + | | + V | + BPF_BTF_GET_FD_BY_ID (get btf_fd) | + | | + V | + BPF_OBJ_GET_INFO_BY_FD (get btf) | + | | + V V + pretty print types, dump func signatures and line info, etc. + + +3.1 BPF_BTF_LOAD +================ + +Load a blob of BTF data into kernel. A blob of data +described in :ref:`BTF_Type_String` +can be directly loaded into the kernel. +A ``btf_fd`` returns to userspace. + +3.2 BPF_MAP_CREATE +================== + +A map can be created with ``btf_fd`` and specified key/value type id.:: + + __u32 btf_fd; /* fd pointing to a BTF type data */ + __u32 btf_key_type_id; /* BTF type_id of the key */ + __u32 btf_value_type_id; /* BTF type_id of the value */ + +In libbpf, the map can be defined with extra annotation like below: +:: + + struct bpf_map_def SEC("maps") btf_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(struct ipv_counts), + .max_entries = 4, + }; + BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts); + +Here, the parameters for macro BPF_ANNOTATE_KV_PAIR are map name, +key and value types for the map. +During ELF parsing, libbpf is able to extract key/value type_id's +and assigned them to BPF_MAP_CREATE attributes automatically. + +.. _BPF_Prog_Load: + +3.3 BPF_PROG_LOAD +================= + +During prog_load, func_info and line_info can be passed to kernel with +proper values for the following attributes: +:: + + __u32 insn_cnt; + __aligned_u64 insns; + ...... + __u32 prog_btf_fd; /* fd pointing to BTF type data */ + __u32 func_info_rec_size; /* userspace bpf_func_info size */ + __aligned_u64 func_info; /* func info */ + __u32 func_info_cnt; /* number of bpf_func_info records */ + __u32 line_info_rec_size; /* userspace bpf_line_info size */ + __aligned_u64 line_info; /* line info */ + __u32 line_info_cnt; /* number of bpf_line_info records */ + +The func_info and line_info are an array of below, respectively.:: + + struct bpf_func_info { + __u32 insn_off; /* [0, insn_cnt - 1] */ + __u32 type_id; /* pointing to a BTF_KIND_FUNC type */ + }; + struct bpf_line_info { + __u32 insn_off; /* [0, insn_cnt - 1] */ + __u32 file_name_off; /* offset to string table for the filename */ + __u32 line_off; /* offset to string table for the source line */ + __u32 line_col; /* line number and column number */ + }; + +func_info_rec_size is the size of each func_info record, and line_info_rec_size +is the size of each line_info record. Passing the record size to kernel make +it possible to extend the record itself in the future. + +Below are requirements for func_info: + * func_info[0].insn_off must be 0. + * the func_info insn_off is in strictly increasing order and matches + bpf func boundaries. + +Below are requirements for line_info: + * the first insn in each func must points to a line_info record. + * the line_info insn_off is in strictly increasing order. + +For line_info, the line number and column number are defined as below: +:: + + #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) + #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) + +3.4 BPF_{PROG,MAP}_GET_NEXT_ID + +In kernel, every loaded program, map or btf has a unique id. +The id won't change during the life time of the program, map or btf. + +The bpf syscall command BPF_{PROG,MAP}_GET_NEXT_ID +returns all id's, one for each command, to user space, for bpf +program or maps, +so the inspection tool can inspect all programs and maps. + +3.5 BPF_{PROG,MAP}_GET_FD_BY_ID + +The introspection tool cannot use id to get details about program or maps. +A file descriptor needs to be obtained first for reference counting purpose. + +3.6 BPF_OBJ_GET_INFO_BY_FD +========================== + +Once a program/map fd is acquired, the introspection tool can +get the detailed information from kernel about this fd, +some of which is btf related. For example, +``bpf_map_info`` returns ``btf_id``, key/value type id. +``bpf_prog_info`` returns ``btf_id``, func_info and line info +for translated bpf byte codes, and jited_line_info. + +3.7 BPF_BTF_GET_FD_BY_ID +======================== + +With ``btf_id`` obtained in ``bpf_map_info`` and ``bpf_prog_info``, +bpf syscall command BPF_BTF_GET_FD_BY_ID can retrieve a btf fd. +Then, with command BPF_OBJ_GET_INFO_BY_FD, the btf blob, originally +loaded into the kernel with BPF_BTF_LOAD, can be retrieved. + +With the btf blob, ``bpf_map_info`` and ``bpf_prog_info``, the introspection +tool has full btf knowledge and is able to pretty print map key/values, +dump func signatures, dump line info along with byte/jit codes. + +4. ELF File Format Interface +**************************** + +4.1 .BTF section +================ + +The .BTF section contains type and string data. The format of this section +is same as the one describe in :ref:`BTF_Type_String`. + +.. _BTF_Ext_Section: + +4.2 .BTF.ext section +==================== + +The .BTF.ext section encodes func_info and line_info which +needs loader manipulation before loading into the kernel. + +The specification for .BTF.ext section is defined at +``tools/lib/bpf/btf.h`` and ``tools/lib/bpf/btf.c``. + +The current header of .BTF.ext section:: + + struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + }; + +It is very similar to .BTF section. Instead of type/string section, +it contains func_info and line_info section. See :ref:`BPF_Prog_Load` +for details about func_info and line_info record format. + +The func_info is organized as below.:: + + func_info_rec_size + btf_ext_info_sec for section #1 /* func_info for section #1 */ + btf_ext_info_sec for section #2 /* func_info for section #2 */ + ... + +``func_info_rec_size`` specifies the size of ``bpf_func_info`` structure +when .BTF.ext is generated. btf_ext_info_sec, defined below, is +the func_info for each specific ELF section.:: + + struct btf_ext_info_sec { + __u32 sec_name_off; /* offset to section name */ + __u32 num_info; + /* Followed by num_info * record_size number of bytes */ + __u8 data[0]; + }; + +Here, num_info must be greater than 0. + +The line_info is organized as below.:: + + line_info_rec_size + btf_ext_info_sec for section #1 /* line_info for section #1 */ + btf_ext_info_sec for section #2 /* line_info for section #2 */ + ... + +``line_info_rec_size`` specifies the size of ``bpf_line_info`` structure +when .BTF.ext is generated. + +The interpretation of ``bpf_func_info->insn_off`` and +``bpf_line_info->insn_off`` is different between kernel API and ELF API. +For kernel API, the ``insn_off`` is the instruction offset in the unit +of ``struct bpf_insn``. For ELF API, the ``insn_off`` is the byte offset +from the beginning of section (``btf_ext_info_sec->sec_name_off``). + +5. Using BTF +************ + +5.1 bpftool map pretty print +============================ + +With BTF, the map key/value can be printed based on fields rather than +simply raw bytes. This is especially +valuable for large structure or if you data structure +has bitfields. For example, for the following map,:: + + enum A { A1, A2, A3, A4, A5 }; + typedef enum A ___A; + struct tmp_t { + char a1:4; + int a2:4; + int :4; + __u32 a3:4; + int b; + ___A b1:4; + enum A b2:4; + }; + struct bpf_map_def SEC("maps") tmpmap = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct tmp_t), + .max_entries = 1, + }; + BPF_ANNOTATE_KV_PAIR(tmpmap, int, struct tmp_t); + +bpftool is able to pretty print like below: +:: + + [{ + "key": 0, + "value": { + "a1": 0x2, + "a2": 0x4, + "a3": 0x6, + "b": 7, + "b1": 0x8, + "b2": 0xa + } + } + ] + +5.2 bpftool prog dump +===================== + +The following is an example to show func_info and line_info +can help prog dump with better kernel symbol name, function prototype +and line information.:: + + $ bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv + [...] + int test_long_fname_2(struct dummy_tracepoint_args * arg): + bpf_prog_44a040bf25481309_test_long_fname_2: + ; static int test_long_fname_2(struct dummy_tracepoint_args *arg) + 0: push %rbp + 1: mov %rsp,%rbp + 4: sub $0x30,%rsp + b: sub $0x28,%rbp + f: mov %rbx,0x0(%rbp) + 13: mov %r13,0x8(%rbp) + 17: mov %r14,0x10(%rbp) + 1b: mov %r15,0x18(%rbp) + 1f: xor %eax,%eax + 21: mov %rax,0x20(%rbp) + 25: xor %esi,%esi + ; int key = 0; + 27: mov %esi,-0x4(%rbp) + ; if (!arg->sock) + 2a: mov 0x8(%rdi),%rdi + ; if (!arg->sock) + 2e: cmp $0x0,%rdi + 32: je 0x0000000000000070 + 34: mov %rbp,%rsi + ; counts = bpf_map_lookup_elem(&btf_map, &key); + [...] + +5.3 verifier log +================ + +The following is an example how line_info can help verifier failure debug.:: + + /* The code at tools/testing/selftests/bpf/test_xdp_noinline.c + * is modified as below. + */ + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + /* + if (data + 4 > data_end) + return XDP_DROP; + */ + *(u32 *)data = dst->dst; + + $ bpftool prog load ./test_xdp_noinline.o /sys/fs/bpf/test_xdp_noinline type xdp + ; data = (void *)(long)xdp->data; + 224: (79) r2 = *(u64 *)(r10 -112) + 225: (61) r2 = *(u32 *)(r2 +0) + ; *(u32 *)data = dst->dst; + 226: (63) *(u32 *)(r2 +0) = r1 + invalid access to packet, off=0 size=4, R2(id=0,off=0,r=0) + R2 offset is outside of the packet + +6. BTF Generation +***************** + +You need latest pahole + + https://git.kernel.org/pub/scm/devel/pahole/pahole.git/ + +or llvm (8.0 or later). The pahole acts as a dwarf2btf converter. It doesn't support .BTF.ext +and btf BTF_KIND_FUNC type yet. For example,:: + + -bash-4.4$ cat t.c + struct t { + int a:2; + int b:3; + int c:2; + } g; + -bash-4.4$ gcc -c -O2 -g t.c + -bash-4.4$ pahole -JV t.o + File t.o: + [1] STRUCT t kind_flag=1 size=4 vlen=3 + a type_id=2 bitfield_size=2 bits_offset=0 + b type_id=2 bitfield_size=3 bits_offset=2 + c type_id=2 bitfield_size=2 bits_offset=5 + [2] INT int size=4 bit_offset=0 nr_bits=32 encoding=SIGNED + +The llvm is able to generate .BTF and .BTF.ext directly with -g for bpf target only. +The assembly code (-S) is able to show the BTF encoding in assembly format.:: + + -bash-4.4$ cat t2.c + typedef int __int32; + struct t2 { + int a2; + int (*f2)(char q1, __int32 q2, ...); + int (*f3)(); + } g2; + int main() { return 0; } + int test() { return 0; } + -bash-4.4$ clang -c -g -O2 -target bpf t2.c + -bash-4.4$ readelf -S t2.o + ...... + [ 8] .BTF PROGBITS 0000000000000000 00000247 + 000000000000016e 0000000000000000 0 0 1 + [ 9] .BTF.ext PROGBITS 0000000000000000 000003b5 + 0000000000000060 0000000000000000 0 0 1 + [10] .rel.BTF.ext REL 0000000000000000 000007e0 + 0000000000000040 0000000000000010 16 9 8 + ...... + -bash-4.4$ clang -S -g -O2 -target bpf t2.c + -bash-4.4$ cat t2.s + ...... + .section .BTF,"",@progbits + .short 60319 # 0xeb9f + .byte 1 + .byte 0 + .long 24 + .long 0 + .long 220 + .long 220 + .long 122 + .long 0 # BTF_KIND_FUNC_PROTO(id = 1) + .long 218103808 # 0xd000000 + .long 2 + .long 83 # BTF_KIND_INT(id = 2) + .long 16777216 # 0x1000000 + .long 4 + .long 16777248 # 0x1000020 + ...... + .byte 0 # string offset=0 + .ascii ".text" # string offset=1 + .byte 0 + .ascii "/home/yhs/tmp-pahole/t2.c" # string offset=7 + .byte 0 + .ascii "int main() { return 0; }" # string offset=33 + .byte 0 + .ascii "int test() { return 0; }" # string offset=58 + .byte 0 + .ascii "int" # string offset=83 + ...... + .section .BTF.ext,"",@progbits + .short 60319 # 0xeb9f + .byte 1 + .byte 0 + .long 24 + .long 0 + .long 28 + .long 28 + .long 44 + .long 8 # FuncInfo + .long 1 # FuncInfo section string offset=1 + .long 2 + .long .Lfunc_begin0 + .long 3 + .long .Lfunc_begin1 + .long 5 + .long 16 # LineInfo + .long 1 # LineInfo section string offset=1 + .long 2 + .long .Ltmp0 + .long 7 + .long 33 + .long 7182 # Line 7 Col 14 + .long .Ltmp3 + .long 7 + .long 58 + .long 8206 # Line 8 Col 14 + +7. Testing +********** + +Kernel bpf selftest `test_btf.c` provides extensive set of BTF related tests. diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 00a8450a602f..4e77932959cc 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -15,6 +15,13 @@ that goes into great technical depth about the BPF Architecture. The primary info for the bpf syscall is available in the `man-pages`_ for `bpf(2)`_. +BPF Type Format (BTF) +===================== + +.. toctree:: + :maxdepth: 1 + + btf Frequently asked questions (FAQ) From c76e4c228b053eb108e6c3221bef338aa2c98fac Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Mon, 21 Jan 2019 22:06:38 +0900 Subject: [PATCH 25/68] libbpf: Show supported ELF section names when failing to guess prog/attach type We need to let users check their wrong ELF section name with proper ELF section names when they fail to get a prog/attach type from it. Because users can't realize libbpf guess prog/attach types from given ELF section names. For example, when a 'cgroup' section name of a BPF program is used, show available ELF section names(types). Before: $ bpftool prog load bpf-prog.o /sys/fs/bpf/prog1 Error: failed to guess program type based on ELF section name cgroup After: libbpf: failed to guess program type based on ELF section name 'cgroup' libbpf: supported section(type) names are: socket kprobe/ kretprobe/ classifier action tracepoint/ raw_tracepoint/ xdp perf_event lwt_in lwt_out lwt_xmit lwt_seg6local cgroup_skb/ingress cgroup_skb/egress cgroup/skb cgroup/sock cgroup/post_bind4 cgroup/post_bind6 cgroup/dev sockops sk_skb/stream_parser sk_skb/stream_verdict sk_skb sk_msg lirc_mode2 flow_dissector cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 Signed-off-by: Taeung Song Cc: Quentin Monnet Cc: Jakub Kicinski Cc: Andrey Ignatov Reviewed-by: Quentin Monnet Acked-by: Jakub Kicinski Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/prog.c | 10 ++-- tools/lib/bpf/libbpf.c | 46 ++++++++++++++++++- .../selftests/bpf/test_socket_cookie.c | 4 +- 3 files changed, 48 insertions(+), 12 deletions(-) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2d1bb7d6ff51..0640e9bc0ada 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -930,10 +930,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) err = libbpf_prog_type_by_name(type, &attr.prog_type, &expected_attach_type); free(type); - if (err < 0) { - p_err("unknown program type '%s'", *argv); + if (err < 0) goto err_free_reuse_maps; - } + NEXT_ARG(); } else if (is_prefix(*argv, "map")) { void *new_map_replace; @@ -1028,11 +1027,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only) err = libbpf_prog_type_by_name(sec_name, &prog_type, &expected_attach_type); - if (err < 0) { - p_err("failed to guess program type based on section name %s\n", - sec_name); + if (err < 0) goto err_close_obj; - } } bpf_program__set_ifindex(pos, ifindex); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 169e347c76f6..2ccde17957e6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2667,9 +2667,38 @@ static const struct { #undef BPF_EAPROG_SEC #undef BPF_APROG_COMPAT +#define MAX_TYPE_NAME_SIZE 32 + +static char *libbpf_get_type_names(bool attach_type) +{ + int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE; + char *buf; + + buf = malloc(len); + if (!buf) + return NULL; + + buf[0] = '\0'; + /* Forge string buf with all available names */ + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (attach_type && !section_names[i].is_attachable) + continue; + + if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) { + free(buf); + return NULL; + } + strcat(buf, " "); + strcat(buf, section_names[i].sec); + } + + return buf; +} + int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type) { + char *type_names; int i; if (!name) @@ -2682,12 +2711,20 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, *expected_attach_type = section_names[i].expected_attach_type; return 0; } + pr_warning("failed to guess program type based on ELF section name '%s'\n", name); + type_names = libbpf_get_type_names(false); + if (type_names != NULL) { + pr_info("supported section(type) names are:%s\n", type_names); + free(type_names); + } + return -EINVAL; } int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { + char *type_names; int i; if (!name) @@ -2701,6 +2738,13 @@ int libbpf_attach_type_by_name(const char *name, *attach_type = section_names[i].attach_type; return 0; } + pr_warning("failed to guess attach type based on ELF section name '%s'\n", name); + type_names = libbpf_get_type_names(true); + if (type_names != NULL) { + pr_info("attachable section(type) names are:%s\n", type_names); + free(type_names); + } + return -EINVAL; } @@ -2907,8 +2951,6 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, err = bpf_program__identify_section(prog, &prog_type, &expected_attach_type); if (err < 0) { - pr_warning("failed to guess program type based on section name %s\n", - prog->section_name); bpf_object__close(obj); return -EINVAL; } diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index fc7832ee566b..e51d63786ff8 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -158,10 +158,8 @@ static int run_test(int cgfd) bpf_object__for_each_program(prog, pobj) { prog_name = bpf_program__title(prog, /*needs_copy*/ false); - if (libbpf_attach_type_by_name(prog_name, &attach_type)) { - log_err("Unexpected prog: %s", prog_name); + if (libbpf_attach_type_by_name(prog_name, &attach_type)) goto err; - } err = bpf_prog_attach(bpf_program__fd(prog), cgfd, attach_type, BPF_F_ALLOW_OVERRIDE); From bbebce8eb9a54091b9a66121fd26b7caabd76895 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 17 Jan 2019 11:56:12 -0800 Subject: [PATCH 26/68] selftests/bpf: don't hardcode iptables/nc path in test_tcpnotify_user system() is calling shell which should find the appropriate full path via $PATH. On some systems, full path to iptables and/or nc might be different that we one we have hardcoded. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_tcpnotify_user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index 4e4353711a86..86152d9ae95b 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -148,17 +148,17 @@ int main(int argc, char **argv) pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd); sprintf(test_script, - "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP", + "iptables -A INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); sprintf(test_script, - "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ", + "nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ", TESTPORT); system(test_script); sprintf(test_script, - "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP", + "iptables -D INPUT -p tcp --dport %d -j DROP", TESTPORT); system(test_script); From 2cbd95a5c4fb855a4177c0343a880cc2091f500d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:18 -0800 Subject: [PATCH 27/68] bpf: change parameters of call/branch offset adjustment In preparation for code removal change parameters to branch and call adjustment functions to be more universal. The current parameters assume we are patching a single instruction with a longer set. A diagram may help reading the change, this is for the patch single case, patching instruction 1 with a replacement of 4: ____ 0 |____| 1 |____| <-- pos ^ 2 | | <-- end old ^ | 3 | | | delta | len 4 |____| | | (patch region) 5 | | <-- end new v v 6 |____| end_old = pos + 1 end_new = pos + delta + 1 If we are before the patch region - curr variable and the target are fully in old coordinates (hence comparing against end_old). If we are after the region curr is in new coordinates (hence the comparison to end_new) but target is in mixed coordinates, so we just check if it falls before end_new, and if so it needs the adjustment. Note that we will not fix up branches which land in removed region in case of removal, which should be okay, as we are only going to remove dead code. Signed-off-by: Jakub Kicinski Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- kernel/bpf/core.c | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index f908b9356025..ad08ba341197 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -307,15 +307,16 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) return 0; } -static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, - u32 curr, const bool probe_pass) +static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, + s32 end_new, u32 curr, const bool probe_pass) { const s64 imm_min = S32_MIN, imm_max = S32_MAX; + s32 delta = end_new - end_old; s64 imm = insn->imm; - if (curr < pos && curr + imm + 1 > pos) + if (curr < pos && curr + imm + 1 >= end_old) imm += delta; - else if (curr > pos + delta && curr + imm + 1 <= pos + delta) + else if (curr >= end_new && curr + imm + 1 < end_new) imm -= delta; if (imm < imm_min || imm > imm_max) return -ERANGE; @@ -324,15 +325,16 @@ static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta, return 0; } -static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, - u32 curr, const bool probe_pass) +static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, + s32 end_new, u32 curr, const bool probe_pass) { const s32 off_min = S16_MIN, off_max = S16_MAX; + s32 delta = end_new - end_old; s32 off = insn->off; - if (curr < pos && curr + off + 1 > pos) + if (curr < pos && curr + off + 1 >= end_old) off += delta; - else if (curr > pos + delta && curr + off + 1 <= pos + delta) + else if (curr >= end_new && curr + off + 1 < end_new) off -= delta; if (off < off_min || off > off_max) return -ERANGE; @@ -341,10 +343,10 @@ static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta, return 0; } -static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, - const bool probe_pass) +static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, + s32 end_new, const bool probe_pass) { - u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0); + u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); struct bpf_insn *insn = prog->insnsi; int ret = 0; @@ -356,8 +358,8 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, * do any other adjustments. Therefore skip the patchlet. */ if (probe_pass && i == pos) { - i += delta + 1; - insn++; + i = end_new; + insn = prog->insnsi + end_old; } code = insn->code; if (BPF_CLASS(code) != BPF_JMP || @@ -367,11 +369,11 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta, if (BPF_OP(code) == BPF_CALL) { if (insn->src_reg != BPF_PSEUDO_CALL) continue; - ret = bpf_adj_delta_to_imm(insn, pos, delta, i, - probe_pass); + ret = bpf_adj_delta_to_imm(insn, pos, end_old, + end_new, i, probe_pass); } else { - ret = bpf_adj_delta_to_off(insn, pos, delta, i, - probe_pass); + ret = bpf_adj_delta_to_off(insn, pos, end_old, + end_new, i, probe_pass); } if (ret) break; @@ -421,7 +423,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, * we afterwards may not fail anymore. */ if (insn_adj_cnt > cnt_max && - bpf_adj_branches(prog, off, insn_delta, true)) + bpf_adj_branches(prog, off, off + 1, off + len, true)) return NULL; /* Several new instructions need to be inserted. Make room @@ -453,7 +455,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, * the ship has sailed to reverse to the original state. An * overflow cannot happen at this point. */ - BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false)); + BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); bpf_adj_linfo(prog_adj, off, insn_delta); From e2ae4ca266a1c9a0163738129506dbc63d5cca80 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:19 -0800 Subject: [PATCH 28/68] bpf: verifier: hard wire branches to dead code Loading programs with dead code becomes more and more common, as people begin to patch constants at load time. Turn conditional jumps to unconditional ones, to avoid potential branch misprediction penalty. This optimization is enabled for privileged users only. For branches which just fall through we could just mark them as not seen and have dead code removal take care of them, but that seems less clean. v0.2: - don't call capable(CAP_SYS_ADMIN) twice (Jiong). v3: - fix GCC warning; Signed-off-by: Jakub Kicinski Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ce87198ecd01..bf1f98e8beb6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6458,6 +6458,40 @@ static void sanitize_dead_code(struct bpf_verifier_env *env) } } +static bool insn_is_cond_jump(u8 code) +{ + u8 op; + + if (BPF_CLASS(code) != BPF_JMP) + return false; + + op = BPF_OP(code); + return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; +} + +static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) +{ + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); + struct bpf_insn *insn = env->prog->insnsi; + const int insn_cnt = env->prog->len; + int i; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (!insn_is_cond_jump(insn->code)) + continue; + + if (!aux_data[i + 1].seen) + ja.off = insn->off; + else if (!aux_data[i + 1 + insn->off].seen) + ja.off = 0; + else + continue; + + memcpy(insn, &ja, sizeof(ja)); + } +} + /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff @@ -7149,6 +7183,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, struct bpf_verifier_env *env; struct bpf_verifier_log *log; int ret = -EINVAL; + bool is_priv; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) @@ -7195,6 +7230,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) env->strict_alignment = false; + is_priv = capable(CAP_SYS_ADMIN); + env->allow_ptr_leaks = is_priv; + ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; @@ -7212,8 +7250,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, if (!env->explored_states) goto skip_full_check; - env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); - ret = check_subprogs(env); if (ret < 0) goto skip_full_check; @@ -7243,6 +7279,11 @@ skip_full_check: ret = check_max_stack_depth(env); /* instruction rewrites happen after this point */ + if (is_priv) { + if (ret == 0) + opt_hard_wire_dead_code_branches(env); + } + if (ret == 0) sanitize_dead_code(env); From 52875a04f4b26e7ef30a288ea096f7cfec0e93cd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:20 -0800 Subject: [PATCH 29/68] bpf: verifier: remove dead code Instead of overwriting dead code with jmp -1 instructions remove it completely for root. Adjust verifier state and line info appropriately. v2: - adjust func_info (Alexei); - make sure first instruction retains line info (Alexei). v4: (Yonghong) - remove unnecessary if (!insn to remove) checks; - always keep last line info if first live instruction lacks one. v5: (Martin Lau) - improve and clarify comments. Signed-off-by: Jakub Kicinski Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 1 + kernel/bpf/core.c | 12 +++ kernel/bpf/verifier.c | 176 ++++++++++++++++++++++++++++++++++++++++- 3 files changed, 186 insertions(+), 3 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..be9af6b4a9e4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -778,6 +778,7 @@ static inline bool bpf_dump_raw_ok(void) struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); void bpf_clear_redirect_map(struct bpf_map *map); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ad08ba341197..2a81b8af3748 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -462,6 +462,18 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, return prog_adj; } +int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) +{ + /* Branch offsets can't overflow when program is shrinking, no need + * to call bpf_adj_branches(..., true) here + */ + memmove(prog->insnsi + off, prog->insnsi + off + cnt, + sizeof(struct bpf_insn) * (prog->len - off - cnt)); + prog->len -= cnt; + + return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); +} + void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) { int i; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bf1f98e8beb6..099b2541f87f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6432,6 +6432,150 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of return new_prog; } +static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, + u32 off, u32 cnt) +{ + int i, j; + + /* find first prog starting at or after off (first to remove) */ + for (i = 0; i < env->subprog_cnt; i++) + if (env->subprog_info[i].start >= off) + break; + /* find first prog starting at or after off + cnt (first to stay) */ + for (j = i; j < env->subprog_cnt; j++) + if (env->subprog_info[j].start >= off + cnt) + break; + /* if j doesn't start exactly at off + cnt, we are just removing + * the front of previous prog + */ + if (env->subprog_info[j].start != off + cnt) + j--; + + if (j > i) { + struct bpf_prog_aux *aux = env->prog->aux; + int move; + + /* move fake 'exit' subprog as well */ + move = env->subprog_cnt + 1 - j; + + memmove(env->subprog_info + i, + env->subprog_info + j, + sizeof(*env->subprog_info) * move); + env->subprog_cnt -= j - i; + + /* remove func_info */ + if (aux->func_info) { + move = aux->func_info_cnt - j; + + memmove(aux->func_info + i, + aux->func_info + j, + sizeof(*aux->func_info) * move); + aux->func_info_cnt -= j - i; + /* func_info->insn_off is set after all code rewrites, + * in adjust_btf_func() - no need to adjust + */ + } + } else { + /* convert i from "first prog to remove" to "first to adjust" */ + if (env->subprog_info[i].start == off) + i++; + } + + /* update fake 'exit' subprog as well */ + for (; i <= env->subprog_cnt; i++) + env->subprog_info[i].start -= cnt; + + return 0; +} + +static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, + u32 cnt) +{ + struct bpf_prog *prog = env->prog; + u32 i, l_off, l_cnt, nr_linfo; + struct bpf_line_info *linfo; + + nr_linfo = prog->aux->nr_linfo; + if (!nr_linfo) + return 0; + + linfo = prog->aux->linfo; + + /* find first line info to remove, count lines to be removed */ + for (i = 0; i < nr_linfo; i++) + if (linfo[i].insn_off >= off) + break; + + l_off = i; + l_cnt = 0; + for (; i < nr_linfo; i++) + if (linfo[i].insn_off < off + cnt) + l_cnt++; + else + break; + + /* First live insn doesn't match first live linfo, it needs to "inherit" + * last removed linfo. prog is already modified, so prog->len == off + * means no live instructions after (tail of the program was removed). + */ + if (prog->len != off && l_cnt && + (i == nr_linfo || linfo[i].insn_off != off + cnt)) { + l_cnt--; + linfo[--i].insn_off = off + cnt; + } + + /* remove the line info which refer to the removed instructions */ + if (l_cnt) { + memmove(linfo + l_off, linfo + i, + sizeof(*linfo) * (nr_linfo - i)); + + prog->aux->nr_linfo -= l_cnt; + nr_linfo = prog->aux->nr_linfo; + } + + /* pull all linfo[i].insn_off >= off + cnt in by cnt */ + for (i = l_off; i < nr_linfo; i++) + linfo[i].insn_off -= cnt; + + /* fix up all subprogs (incl. 'exit') which start >= off */ + for (i = 0; i <= env->subprog_cnt; i++) + if (env->subprog_info[i].linfo_idx > l_off) { + /* program may have started in the removed region but + * may not be fully removed + */ + if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) + env->subprog_info[i].linfo_idx -= l_cnt; + else + env->subprog_info[i].linfo_idx = l_off; + } + + return 0; +} + +static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) +{ + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + unsigned int orig_prog_len = env->prog->len; + int err; + + err = bpf_remove_insns(env->prog, off, cnt); + if (err) + return err; + + err = adjust_subprog_starts_after_remove(env, off, cnt); + if (err) + return err; + + err = bpf_adj_linfo_after_remove(env, off, cnt); + if (err) + return err; + + memmove(aux_data + off, aux_data + off + cnt, + sizeof(*aux_data) * (orig_prog_len - off - cnt)); + + return 0; +} + /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code @@ -6492,6 +6636,30 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) } } +static int opt_remove_dead_code(struct bpf_verifier_env *env) +{ + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + int insn_cnt = env->prog->len; + int i, err; + + for (i = 0; i < insn_cnt; i++) { + int j; + + j = 0; + while (i + j < insn_cnt && !aux_data[i + j].seen) + j++; + if (!j) + continue; + + err = verifier_remove_insns(env, i, j); + if (err) + return err; + insn_cnt = env->prog->len; + } + + return 0; +} + /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff @@ -7282,11 +7450,13 @@ skip_full_check: if (is_priv) { if (ret == 0) opt_hard_wire_dead_code_branches(env); + if (ret == 0) + ret = opt_remove_dead_code(env); + } else { + if (ret == 0) + sanitize_dead_code(env); } - if (ret == 0) - sanitize_dead_code(env); - if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); From a1b14abc009d9c13be355dbd4a4c4d47816ad3db Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:21 -0800 Subject: [PATCH 30/68] bpf: verifier: remove unconditional branches by 0 Unconditional branches by 0 instructions are basically noops but they can result from earlier optimizations, e.g. a conditional jumps which would never be taken or a conditional jump around dead code. Remove those branches. v0.2: - s/opt_remove_dead_branches/opt_remove_nops/ (Jiong). Signed-off-by: Jakub Kicinski Reviewed-by: Jiong Wang Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 099b2541f87f..f39bca188a5c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6660,6 +6660,27 @@ static int opt_remove_dead_code(struct bpf_verifier_env *env) return 0; } +static int opt_remove_nops(struct bpf_verifier_env *env) +{ + const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + int i, err; + + for (i = 0; i < insn_cnt; i++) { + if (memcmp(&insn[i], &ja, sizeof(ja))) + continue; + + err = verifier_remove_insns(env, i, 1); + if (err) + return err; + insn_cnt--; + i--; + } + + return 0; +} + /* convert load instructions that access fields of a context type into a * sequence of instructions that access fields of the underlying structure: * struct __sk_buff -> struct sk_buff @@ -7452,6 +7473,8 @@ skip_full_check: opt_hard_wire_dead_code_branches(env); if (ret == 0) ret = opt_remove_dead_code(env); + if (ret == 0) + ret = opt_remove_nops(env); } else { if (ret == 0) sanitize_dead_code(env); From cb05077508bf3635fcf1637fdb30136559555c65 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:22 -0800 Subject: [PATCH 31/68] selftests: bpf: add tests for dead code removal Add tests for newly added dead code elimination. Both verifier and BTF tests are added. BTF test infrastructure has to be extended to be able to account for line info which is eliminated during dead code removal. Signed-off-by: Jakub Kicinski Acked-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_btf.c | 396 +++++++++++++++++++- tools/testing/selftests/bpf/test_verifier.c | 160 ++++++++ 2 files changed, 549 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index bfd92182533b..179f1d8ec5bf 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -4293,6 +4293,10 @@ static struct prog_info_raw_test { __u32 line_info_rec_size; __u32 nr_jited_ksyms; bool expected_prog_load_failure; + __u32 dead_code_cnt; + __u32 dead_code_mask; + __u32 dead_func_cnt; + __u32 dead_func_mask; } info_raw_tests[] = { { .descr = "func_type (main func + one sub)", @@ -4719,6 +4723,369 @@ static struct prog_info_raw_test { .expected_prog_load_failure = true, }, +{ + .descr = "line_info (dead start)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0/* dead jmp */\0int a=1;\0int b=2;\0return a + b;\0return a + b;"), + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 6), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 1, + .dead_code_mask = 0x01, +}, + +{ + .descr = "line_info (dead end)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0/* dead jmp */\0return a + b;\0/* dead exit */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 0, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 12), + BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 11), + BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 9), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 5, 8), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 6, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 2, + .dead_code_mask = 0x28, +}, + +{ + .descr = "line_info (dead code + subprog + func_info)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0/* dead jmp */" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 8), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {14, 3} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(14, 0, NAME_TBD, 3, 8), + BPF_LINE_INFO_ENC(16, 0, NAME_TBD, 4, 7), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 9, + .dead_code_mask = 0x3fe, +}, + +{ + .descr = "line_info (dead subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */" + "\0return 0;\0return 0;\0/* dead */\0/* dead */" + "\0/* dead */\0return bla + 1;\0return bla + 1;" + "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {6, 3}, {9, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 3, + .dead_code_mask = 0x70, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead last subprog)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0int a=1+1;\0/* live call */" + "\0return 0;\0/* dead */\0/* dead */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {5, 3} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 1, + .dead_code_cnt = 2, + .dead_code_mask = 0x18, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead subprog + dead start)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* dead */" + "\0return 0;\0return 0;\0return 0;" + "\0/* dead */\0/* dead */\0/* dead */\0/* dead */" + "\0return b + 1;\0return b + 1;\0return b + 1;"), + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {7, 3}, {10, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(10, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BPF_LINE_INFO_ENC(13, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 5, + .dead_code_mask = 0x1e2, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead subprog + dead start w/ move)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [5] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0dead\0main\0func\0int a=1+1;\0/* live call */" + "\0return 0;\0return 0;\0/* dead */\0/* dead */" + "\0/* dead */\0return bla + 1;\0return bla + 1;" + "\0return bla + 1;\0return func(a);\0b+=1;\0return b;"), + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_CALL_REL(3), + BPF_CALL_REL(5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_CALL_REL(1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 3, + .func_info_rec_size = 8, + .func_info = { {0, 4}, {6, 3}, {9, 5} }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(4, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(8, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(9, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(11, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(12, 0, NAME_TBD, 2, 9), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, + .dead_code_cnt = 3, + .dead_code_mask = 0x70, + .dead_func_cnt = 1, + .dead_func_mask = 0x2, +}, + +{ + .descr = "line_info (dead end + subprog start w/ no linfo)", + .raw_types = { + BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_FUNC_PROTO_ENC(1, 1), /* [2] */ + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ + BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0int\0x\0main\0func\0/* main linfo */\0/* func linfo */"), + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 1, 3), + BPF_CALL_REL(3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .func_info_cnt = 2, + .func_info_rec_size = 8, + .func_info = { {0, 3}, {6, 4}, }, + .line_info = { + BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), + BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 1, 10), + BTF_END_RAW, + }, + .line_info_rec_size = sizeof(struct bpf_line_info), + .nr_jited_ksyms = 2, +}, + }; static size_t probe_prog_length(const struct bpf_insn *fp) @@ -4778,6 +5145,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test, struct bpf_func_info *finfo; __u32 info_len, rec_size, i; void *func_info = NULL; + __u32 nr_func_info; int err; /* get necessary lens */ @@ -4787,7 +5155,8 @@ static int test_get_finfo(const struct prog_info_raw_test *test, fprintf(stderr, "%s\n", btf_log_buf); return -1; } - if (CHECK(info.nr_func_info != test->func_info_cnt, + nr_func_info = test->func_info_cnt - test->dead_func_cnt; + if (CHECK(info.nr_func_info != nr_func_info, "incorrect info.nr_func_info (1st) %d", info.nr_func_info)) { return -1; @@ -4808,7 +5177,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test, /* reset info to only retrieve func_info related data */ memset(&info, 0, sizeof(info)); - info.nr_func_info = test->func_info_cnt; + info.nr_func_info = nr_func_info; info.func_info_rec_size = rec_size; info.func_info = ptr_to_u64(func_info); err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); @@ -4817,7 +5186,7 @@ static int test_get_finfo(const struct prog_info_raw_test *test, err = -1; goto done; } - if (CHECK(info.nr_func_info != test->func_info_cnt, + if (CHECK(info.nr_func_info != nr_func_info, "incorrect info.nr_func_info (2nd) %d", info.nr_func_info)) { err = -1; @@ -4831,7 +5200,9 @@ static int test_get_finfo(const struct prog_info_raw_test *test, } finfo = func_info; - for (i = 0; i < test->func_info_cnt; i++) { + for (i = 0; i < nr_func_info; i++) { + if (test->dead_func_mask & (1 << i)) + continue; if (CHECK(finfo->type_id != test->func_info[i][1], "incorrect func_type %u expected %u", finfo->type_id, test->func_info[i][1])) { @@ -4860,6 +5231,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test, struct bpf_prog_info info = {}; __u32 *jited_func_lens = NULL; __u64 cur_func_ksyms; + __u32 dead_insns; int err; jited_cnt = cnt; @@ -4868,7 +5240,7 @@ static int test_get_linfo(const struct prog_info_raw_test *test, if (test->nr_jited_ksyms) nr_jited_ksyms = test->nr_jited_ksyms; else - nr_jited_ksyms = test->func_info_cnt; + nr_jited_ksyms = test->func_info_cnt - test->dead_func_cnt; nr_jited_func_lens = nr_jited_ksyms; info_len = sizeof(struct bpf_prog_info); @@ -4970,12 +5342,20 @@ static int test_get_linfo(const struct prog_info_raw_test *test, goto done; } + dead_insns = 0; + while (test->dead_code_mask & (1 << dead_insns)) + dead_insns++; + CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u", linfo[0].insn_off); for (i = 1; i < cnt; i++) { const struct bpf_line_info *expected_linfo; - expected_linfo = patched_linfo + (i * test->line_info_rec_size); + while (test->dead_code_mask & (1 << (i + dead_insns))) + dead_insns++; + + expected_linfo = patched_linfo + + ((i + dead_insns) * test->line_info_rec_size); if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off, "linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u", i, linfo[i].insn_off, @@ -5133,7 +5513,9 @@ static int do_test_info_raw(unsigned int test_num) if (err) goto done; - err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd); + err = test_get_linfo(test, patched_linfo, + attr.line_info_cnt - test->dead_code_cnt, + prog_fd); if (err) goto done; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 2fd90d456892..4f67afeec8d9 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -15599,6 +15599,166 @@ static struct bpf_test tests[] = { .result_unpriv = ACCEPT, .result = ACCEPT, }, + { + "dead code: start", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: mid 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: mid 2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, + }, + { + "dead code: end 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: end 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: end 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_JMP_IMM(BPF_JA, 0, 0, -5), + }, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: tail of main + func", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: tail of main + two functions", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: function in the middle and mid of another func", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, + }, + { + "dead code: middle of main before call", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 2, + }, + { + "dead code: start of a function", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 2, + }, }; static int probe_filter_length(const struct bpf_insn *fp) From 9e4c24e7ee7dfd3898269519103e823892b730d8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:23 -0800 Subject: [PATCH 32/68] bpf: verifier: record original instruction index The communication between the verifier and advanced JITs is based on instruction indexes. We have to keep them stable throughout the optimizations otherwise referring to a particular instruction gets messy quickly. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 573cca00a0e6..f3ae00ee5516 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -187,6 +187,7 @@ struct bpf_insn_aux_data { int sanitize_stack_off; /* stack slot to be cleared */ bool seen; /* this insn was processed by the verifier */ u8 alu_state; /* used in combination with alu_limit */ + unsigned int orig_idx; /* original instruction index */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f39bca188a5c..f2c49b4235df 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7371,7 +7371,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, { struct bpf_verifier_env *env; struct bpf_verifier_log *log; - int ret = -EINVAL; + int i, len, ret = -EINVAL; bool is_priv; /* no program is valid */ @@ -7386,12 +7386,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, return -ENOMEM; log = &env->log; + len = (*prog)->len; env->insn_aux_data = - vzalloc(array_size(sizeof(struct bpf_insn_aux_data), - (*prog)->len)); + vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; + for (i = 0; i < len; i++) + env->insn_aux_data[i].orig_idx = i; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; From 08ca90afba255d05dc3253caa44056e7aecbe8c5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:24 -0800 Subject: [PATCH 33/68] bpf: notify offload JITs about optimizations Let offload JITs know when instructions are replaced and optimized out, so they can update their state appropriately. The optimizations are best effort, if JIT returns an error from any callback verifier will stop notifying it as state may now be out of sync, but the verifier continues making progress. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 7 +++++++ include/linux/bpf_verifier.h | 5 +++++ kernel/bpf/offload.c | 35 +++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 6 ++++++ 4 files changed, 53 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e734f163bd0b..3851529062ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -268,9 +268,15 @@ struct bpf_verifier_ops { }; struct bpf_prog_offload_ops { + /* verifier basic callbacks */ int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int (*finalize)(struct bpf_verifier_env *env); + /* verifier optimization callbacks (called after .finalize) */ + int (*replace_insn)(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); + int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); + /* program management callbacks */ int (*prepare)(struct bpf_prog *prog); int (*translate)(struct bpf_prog *prog); void (*destroy)(struct bpf_prog *prog); @@ -283,6 +289,7 @@ struct bpf_prog_offload { void *dev_priv; struct list_head offloads; bool dev_state; + bool opt_failed; void *jited_image; u32 jited_len; }; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f3ae00ee5516..0620e418dde5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -266,5 +266,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int bpf_prog_offload_finalize(struct bpf_verifier_env *env); +void +bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); +void +bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 54cf2b9c44a4..39dba8c90331 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -173,6 +173,41 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env) return ret; } +void +bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn) +{ + const struct bpf_prog_offload_ops *ops; + struct bpf_prog_offload *offload; + int ret = -EOPNOTSUPP; + + down_read(&bpf_devs_lock); + offload = env->prog->aux->offload; + if (offload) { + ops = offload->offdev->ops; + if (!offload->opt_failed && ops->replace_insn) + ret = ops->replace_insn(env, off, insn); + offload->opt_failed |= ret; + } + up_read(&bpf_devs_lock); +} + +void +bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) +{ + struct bpf_prog_offload *offload; + int ret = -EOPNOTSUPP; + + down_read(&bpf_devs_lock); + offload = env->prog->aux->offload; + if (offload) { + if (!offload->opt_failed && offload->offdev->ops->remove_insns) + ret = offload->offdev->ops->remove_insns(env, off, cnt); + offload->opt_failed |= ret; + } + up_read(&bpf_devs_lock); +} + static void __bpf_prog_offload_destroy(struct bpf_prog *prog) { struct bpf_prog_offload *offload = prog->aux->offload; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f2c49b4235df..8cfe39ef770f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6558,6 +6558,9 @@ static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) unsigned int orig_prog_len = env->prog->len; int err; + if (bpf_prog_is_dev_bound(env->prog->aux)) + bpf_prog_offload_remove_insns(env, off, cnt); + err = bpf_remove_insns(env->prog, off, cnt); if (err) return err; @@ -6632,6 +6635,9 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) else continue; + if (bpf_prog_is_dev_bound(env->prog->aux)) + bpf_prog_offload_replace_insn(env, i, &ja); + memcpy(insn, &ja, sizeof(ja)); } } From e90287f3aaf666c9e18e60f889f82ecfb0ee3c5d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:25 -0800 Subject: [PATCH 34/68] nfp: bpf: don't use instruction number for jump target Instruction number is meaningless at code gen phase. The target of the instruction is overwritten by nfp_fixup_branches(). The convention is to put the raw offset in target address as a place holder. See cmp_* functions. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e23ca90289f7..323587dee498 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -3182,7 +3182,7 @@ bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); } else { ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; - emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); + emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1); offset_br = nfp_prog_current_offset(nfp_prog); } wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); From 91a87a5823f798f1c7b26783e0d2c6d22a1dc28c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:26 -0800 Subject: [PATCH 35/68] nfp: bpf: split up the skip flag We fail program loading if jump lands on a skipped instruction. This is for historical reasons, it used to be that we only skipped instructions optimized out based on prior context, and therefore the optimization would be buggy if we jumped directly to such instruction (because the context would be skipped by the jump). There are cases where instructions can be skipped without any context, for example there is no point in generating code for: r0 |= 0 We will also soon support dropping dead code, so make the skip logic differentiate between "optimized with preceding context" vs other skip types. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 36 ++++++++++--------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 9 +++-- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 323587dee498..054382b9cbe6 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u64 imm = insn->imm; /* sign extend */ if (skip) { - meta->skip = true; + meta->flags |= FLAG_INSN_SKIP_NOOP; return 0; } @@ -1296,7 +1296,7 @@ wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, const struct bpf_insn *insn = &meta->insn; if (skip) { - meta->skip = true; + meta->flags |= FLAG_INSN_SKIP_NOOP; return 0; } @@ -3395,7 +3395,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) int err; list_for_each_entry(meta, &nfp_prog->insns, l) { - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; if (BPF_CLASS(meta->insn.code) != BPF_JMP) continue; @@ -3439,7 +3439,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) jmp_dst = meta->jmp_dst; - if (jmp_dst->skip) { + if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) { pr_err("Branch landing on removed instruction!!\n"); return -ELOOP; } @@ -3689,7 +3689,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog) return nfp_prog->error; } - if (meta->skip) { + if (meta->flags & FLAG_INSN_SKIP_MASK) { nfp_prog->n_translated++; continue; } @@ -3737,10 +3737,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) /* Programs start with R6 = R1 but we ignore the skb pointer */ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn.src_reg == 1 && insn.dst_reg == 6) - meta->skip = true; + meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; /* Return as soon as something doesn't match */ - if (!meta->skip) + if (!(meta->flags & FLAG_INSN_SKIP_MASK)) return; } } @@ -3755,7 +3755,7 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) list_for_each_entry(meta, &nfp_prog->insns, l) { struct bpf_insn insn = meta->insn; - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; if (BPF_CLASS(insn.code) != BPF_ALU && @@ -3829,7 +3829,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) if (meta2->flags & FLAG_INSN_IS_JUMP_DST) continue; - meta2->skip = true; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } } @@ -3869,8 +3869,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) meta3->flags & FLAG_INSN_IS_JUMP_DST) continue; - meta2->skip = true; - meta3->skip = true; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; + meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } } @@ -4065,7 +4065,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) } head_ld_meta->paired_st = &head_st_meta->insn; - head_st_meta->skip = true; + head_st_meta->flags |= + FLAG_INSN_SKIP_PREC_DEPENDENT; } else { head_ld_meta->ldst_gather_len = 0; } @@ -4098,8 +4099,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) head_ld_meta = meta1; head_st_meta = meta2; } else { - meta1->skip = true; - meta2->skip = true; + meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); @@ -4124,7 +4125,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) if (meta->flags & FLAG_INSN_IS_JUMP_DST) cache_avail = false; - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; insn = &meta->insn; @@ -4210,7 +4211,7 @@ start_new: } list_for_each_entry(meta, &nfp_prog->insns, l) { - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { @@ -4246,7 +4247,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) u32 id; nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { - if (meta1->skip || meta2->skip) + if (meta1->flags & FLAG_INSN_SKIP_MASK || + meta2->flags & FLAG_INSN_SKIP_MASK) continue; if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 941277936475..40291aedd895 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -243,6 +243,13 @@ struct nfp_bpf_reg_state { #define FLAG_INSN_IS_JUMP_DST BIT(0) #define FLAG_INSN_IS_SUBPROG_START BIT(1) #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) +/* Instruction is pointless, noop even on its own */ +#define FLAG_INSN_SKIP_NOOP BIT(3) +/* Instruction is optimized out based on preceding instructions */ +#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) + +#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ + FLAG_INSN_SKIP_PREC_DEPENDENT) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -271,7 +278,6 @@ struct nfp_bpf_reg_state { * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags * @subprog_idx: index of subprogram to which the instruction belongs - * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list */ @@ -319,7 +325,6 @@ struct nfp_insn_meta { unsigned short n; unsigned short flags; unsigned short subprog_idx; - bool skip; instr_cb_t double_cb; struct list_head l; From e2fc61146aa6dea81f3231530c130be72af05c5a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:27 -0800 Subject: [PATCH 36/68] nfp: bpf: save original program length Instead of passing env->prog->len around, and trying to adjust for optimized out instructions just save the initial number of instructions in struct nfp_prog. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 4 ++-- drivers/net/ethernet/netronome/nfp/bpf/main.h | 6 ++++-- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 3 ++- .../net/ethernet/netronome/nfp/bpf/verifier.c | 16 +++++++--------- 4 files changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 054382b9cbe6..3308fd92c017 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -4327,7 +4327,7 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog) return ret; } -void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; @@ -4355,7 +4355,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) else dst_idx = meta->n + 1 + meta->insn.off; - dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx); if (pseudo_call) dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 40291aedd895..07879eee3d46 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -462,6 +462,7 @@ struct nfp_bpf_subprog_info { * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral * @subprog: pointer to an array of objects holding info about sub-programs + * @n_insns: number of instructions on @insns list * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -494,6 +495,7 @@ struct nfp_prog { struct nfp_bpf_neutral_map **map_records; struct nfp_bpf_subprog_info *subprog; + unsigned int n_insns; struct list_head insns; }; @@ -510,7 +512,7 @@ struct nfp_bpf_vnic { }; bool nfp_is_subprog_start(struct nfp_insn_meta *meta); -void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); @@ -531,7 +533,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int insn_idx, unsigned int n_insns); + unsigned int insn_idx); void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index f0283854fade..c10aab392cf6 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -163,8 +163,9 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, list_add_tail(&meta->l, &nfp_prog->insns); } + nfp_prog->n_insns = cnt; - nfp_bpf_jit_prepare(nfp_prog, cnt); + nfp_bpf_jit_prepare(nfp_prog); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 337bb862ec1d..2712ab17d57c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -18,15 +18,15 @@ struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int insn_idx, unsigned int n_insns) + unsigned int insn_idx) { unsigned int forward, backward, i; backward = meta->n - insn_idx; forward = insn_idx - meta->n; - if (min(forward, backward) > n_insns - insn_idx - 1) { - backward = n_insns - insn_idx - 1; + if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) { + backward = nfp_prog->n_insns - insn_idx - 1; meta = nfp_prog_last_meta(nfp_prog); } if (min(forward, backward) > insn_idx && backward > insn_idx) { @@ -629,7 +629,7 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; struct nfp_insn_meta *meta = nfp_prog->verifier_meta; - meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len); + meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx); nfp_prog->verifier_meta = meta; if (!nfp_bpf_supported_opcode(meta->insn.code)) { @@ -690,8 +690,7 @@ nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, return 0; } -static unsigned int -nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) +static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); unsigned int max_depth = 0, depth = 0, frame = 0; @@ -726,7 +725,7 @@ continue_subprog: /* Find the callee and start processing it. */ meta = nfp_bpf_goto_meta(nfp_prog, meta, - meta->n + 1 + meta->insn.imm, cnt); + meta->n + 1 + meta->insn.imm); idx = meta->subprog_idx; frame++; goto process_subprog; @@ -778,8 +777,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env) nn = netdev_priv(env->prog->aux->offload->netdev); max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog, - env->prog->len); + nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog); if (nfp_prog->stack_size > max_stack) { pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", nfp_prog->stack_size, max_stack); From a32014b351662fd67c7e0f807855f527d0834d00 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:28 -0800 Subject: [PATCH 37/68] nfp: bpf: support optimizing dead branches Verifier will now optimize out branches to dead code, implement the replace_insn callback to take advantage of that optimization. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/main.h | 14 ++++++++ .../net/ethernet/netronome/nfp/bpf/offload.c | 1 + .../net/ethernet/netronome/nfp/bpf/verifier.c | 34 +++++++++++++++++++ 3 files changed, 49 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 07879eee3d46..a33aa7df1979 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -412,6 +412,17 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } +static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) +{ + u8 op; + + if (BPF_CLASS(meta->insn.code) != BPF_JMP) + return false; + + op = BPF_OP(meta->insn.code); + return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; +} + static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) { struct bpf_insn insn = meta->insn; @@ -520,6 +531,9 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int nfp_bpf_finalize(struct bpf_verifier_env *env); +int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); + extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; struct netdev_bpf; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index c10aab392cf6..877c1b8f95e2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -592,6 +592,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, const struct bpf_prog_offload_ops nfp_bpf_dev_ops = { .insn_hook = nfp_verify_insn, .finalize = nfp_bpf_finalize, + .replace_insn = nfp_bpf_opt_replace_insn, .prepare = nfp_bpf_verifier_prep, .translate = nfp_bpf_translate, .destroy = nfp_bpf_destroy, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 2712ab17d57c..32468e1b1b73 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -786,3 +786,37 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env) return 0; } + +int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn) +{ + struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct nfp_insn_meta *meta = nfp_prog->verifier_meta; + + meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); + nfp_prog->verifier_meta = meta; + + /* conditional jump to jump conversion */ + if (is_mbpf_cond_jump(meta) && + insn->code == (BPF_JMP | BPF_JA | BPF_K)) { + unsigned int tgt_off; + + tgt_off = off + insn->off + 1; + + if (!insn->off) { + meta->jmp_dst = list_next_entry(meta, l); + meta->jump_neg_op = false; + } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) { + pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n", + off, meta->jmp_dst->n, + aux_data[tgt_off].orig_idx); + return -EINVAL; + } + return 0; + } + + pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n", + meta->insn.code, insn->code); + return -EINVAL; +} From 9a06927e778bc4e805acc8fa03573bbf7e597cc5 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 22 Jan 2019 22:45:29 -0800 Subject: [PATCH 38/68] nfp: bpf: support removing dead code Add a verifier callback to the nfp JIT to remove the instructions the verifier deemed to be dead. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/main.h | 6 ++++- .../net/ethernet/netronome/nfp/bpf/offload.c | 5 ++++ .../net/ethernet/netronome/nfp/bpf/verifier.c | 24 +++++++++++++++++++ 3 files changed, 34 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index a33aa7df1979..5813c3e13ebe 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -247,9 +247,12 @@ struct nfp_bpf_reg_state { #define FLAG_INSN_SKIP_NOOP BIT(3) /* Instruction is optimized out based on preceding instructions */ #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) +/* Instruction is optimized by the verifier */ +#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5) #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ - FLAG_INSN_SKIP_PREC_DEPENDENT) + FLAG_INSN_SKIP_PREC_DEPENDENT | \ + FLAG_INSN_SKIP_VERIFIER_OPT) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -533,6 +536,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env); int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, struct bpf_insn *insn); +int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 877c1b8f95e2..55c7dbf8b421 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -220,6 +220,10 @@ static int nfp_bpf_translate(struct bpf_prog *prog) unsigned int max_instr; int err; + /* We depend on dead code elimination succeeding */ + if (prog->aux->offload->opt_failed) + return -EINVAL; + max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); @@ -593,6 +597,7 @@ const struct bpf_prog_offload_ops nfp_bpf_dev_ops = { .insn_hook = nfp_verify_insn, .finalize = nfp_bpf_finalize, .replace_insn = nfp_bpf_opt_replace_insn, + .remove_insns = nfp_bpf_opt_remove_insns, .prepare = nfp_bpf_verifier_prep, .translate = nfp_bpf_translate, .destroy = nfp_bpf_destroy, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 32468e1b1b73..36f56eb4cbe2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -820,3 +820,27 @@ int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, meta->insn.code, insn->code); return -EINVAL; } + +int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) +{ + struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct nfp_insn_meta *meta = nfp_prog->verifier_meta; + unsigned int i; + + meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); + + for (i = 0; i < cnt; i++) { + if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns)) + return -EINVAL; + + /* doesn't count if it already has the flag */ + if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT) + i--; + + meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT; + meta = list_next_entry(meta, l); + } + + return 0; +} From 866e6ac47409f0bd12398b777fef61fb51ce3680 Mon Sep 17 00:00:00 2001 From: Prashant Bhole Date: Thu, 24 Jan 2019 11:51:57 +0900 Subject: [PATCH 39/68] bpftool: feature probing, change default action When 'bpftool feature' is executed it shows incorrect help string. test# bpftool feature Usage: bpftool bpftool probe [COMPONENT] [macros [prefix PREFIX]] bpftool bpftool help COMPONENT := { kernel | dev NAME } Instead of fixing the help text by tweaking argv[] indices, this patch changes the default action to 'probe'. It makes the behavior consistent with other subcommands, where first subcommand without extra parameter results in 'show' action. Signed-off-by: Prashant Bhole Reviewed-by: Quentin Monnet Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/feature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 993c6f1e5473..d672d9086fff 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -753,8 +753,8 @@ static int do_help(int argc, char **argv) } static const struct cmd cmds[] = { - { "help", do_help }, { "probe", do_probe }, + { "help", do_help }, { 0 } }; From d9ff286a0f59fa7843549e49bd240393dd7d8b87 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 23 Jan 2019 09:22:27 -0800 Subject: [PATCH 40/68] bpf: allow BPF programs access skb_shared_info->gso_segs field This adds the ability to read gso_segs from a BPF program. v3: Use BPF_REG_AX instead of BPF_REG_TMP for the temporary register, as suggested by Martin. v2: refined Eddie Hao patch to address Alexei feedback. Signed-off-by: Eric Dumazet Cc: Eddie Hao Cc: Martin KaFai Lau Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/uapi/linux/bpf.h | 1 + net/core/filter.c | 21 ++++++++++++ tools/include/uapi/linux/bpf.h | 1 + tools/testing/selftests/bpf/test_verifier.c | 36 +++++++++++++++++++++ 4 files changed, 59 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 91c43884f295..2940a9854f6d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2540,6 +2540,7 @@ struct __sk_buff { __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; + __u32 gso_segs; }; struct bpf_tunnel_key { diff --git a/net/core/filter.c b/net/core/filter.c index 2b3b436ef545..8e587dd1da20 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6700,6 +6700,27 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, target_size)); break; + case offsetof(struct __sk_buff, gso_segs): + /* si->dst_reg = skb_shinfo(SKB); */ +#ifdef NET_SKBUFF_DATA_USES_OFFSET + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, head)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), + BPF_REG_AX, si->src_reg, + offsetof(struct sk_buff, end)); + *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); +#else + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, end)); +#endif + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), + si->dst_reg, si->dst_reg, + bpf_target_off(struct skb_shared_info, + gso_segs, 2, + target_size)); + break; case offsetof(struct __sk_buff, wire_len): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 91c43884f295..2940a9854f6d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2540,6 +2540,7 @@ struct __sk_buff { __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); __u64 tstamp; __u32 wire_len; + __u32 gso_segs; }; struct bpf_tunnel_key { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 4f67afeec8d9..e4fef6ca8071 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -5663,6 +5663,42 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_CGROUP_SKB, }, + { + "read gso_segs from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "write gso_segs from CGROUP_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=164 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + }, + { + "read gso_segs from CLS", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "multiple registers share map_lookup_elem result", .insns = { From 2f0921262ba943fe9d9f59037a033927d8c4789b Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 24 Jan 2019 08:54:29 -0800 Subject: [PATCH 41/68] selftests/bpf: suppress readelf stderr when probing for BTF support Before: $ make -s -C tools/testing/selftests/bpf readelf: Error: Missing knowledge of 32-bit reloc types used in DWARF sections of machine number 247 readelf: Warning: unable to apply unsupported reloc type 10 to section .debug_info readelf: Warning: unable to apply unsupported reloc type 1 to section .debug_info readelf: Warning: unable to apply unsupported reloc type 10 to section .debug_info After: $ make -s -C tools/testing/selftests/bpf v2: * use llvm-readelf instead of redirecting binutils' readelf stderr to /dev/null Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 70229de510f5..1cca5f2567c2 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -95,6 +95,7 @@ $(BPFOBJ): force CLANG ?= clang LLC ?= llc LLVM_OBJCOPY ?= llvm-objcopy +LLVM_READELF ?= llvm-readelf BTF_PAHOLE ?= pahole PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) @@ -131,7 +132,7 @@ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \ - readelf -S ./llvm_btf_verify.o | grep BTF; \ + $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \ /bin/rm -f ./llvm_btf_verify.o) ifneq ($(BTF_LLVM_PROBE),) From 1d0dc06930a917eaca4156193c6c49f798b95ce7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Thu, 24 Jan 2019 19:59:37 +0100 Subject: [PATCH 42/68] net: xsk: track AF_XDP sockets on a per-netns list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Track each AF_XDP socket in a per-netns list. This will be used later by the sock_diag interface for querying sockets from userspace. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann --- include/net/net_namespace.h | 4 ++++ include/net/netns/xdp.h | 13 +++++++++++++ net/xdp/xsk.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 include/net/netns/xdp.h diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 99d4148e0f90..a68ced28d8f4 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -160,6 +161,9 @@ struct net { #endif #if IS_ENABLED(CONFIG_CAN) struct netns_can can; +#endif +#ifdef CONFIG_XDP_SOCKETS + struct netns_xdp xdp; #endif struct sock *diag_nlsk; atomic_t fnhe_genid; diff --git a/include/net/netns/xdp.h b/include/net/netns/xdp.h new file mode 100644 index 000000000000..e5734261ba0a --- /dev/null +++ b/include/net/netns/xdp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETNS_XDP_H__ +#define __NETNS_XDP_H__ + +#include +#include + +struct netns_xdp { + struct mutex lock; + struct hlist_head list; +}; + +#endif /* __NETNS_XDP_H__ */ diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index a03268454a27..80ca48cefc42 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -350,6 +350,10 @@ static int xsk_release(struct socket *sock) net = sock_net(sk); + mutex_lock(&net->xdp.lock); + sk_del_node_init_rcu(sk); + mutex_unlock(&net->xdp.lock); + local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); local_bh_enable(); @@ -746,6 +750,10 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, mutex_init(&xs->mutex); spin_lock_init(&xs->tx_completion_lock); + mutex_lock(&net->xdp.lock); + sk_add_node_rcu(sk, &net->xdp.list); + mutex_unlock(&net->xdp.lock); + local_bh_disable(); sock_prot_inuse_add(net, &xsk_proto, 1); local_bh_enable(); @@ -759,6 +767,23 @@ static const struct net_proto_family xsk_family_ops = { .owner = THIS_MODULE, }; +static int __net_init xsk_net_init(struct net *net) +{ + mutex_init(&net->xdp.lock); + INIT_HLIST_HEAD(&net->xdp.list); + return 0; +} + +static void __net_exit xsk_net_exit(struct net *net) +{ + WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); +} + +static struct pernet_operations xsk_net_ops = { + .init = xsk_net_init, + .exit = xsk_net_exit, +}; + static int __init xsk_init(void) { int err; @@ -771,8 +796,13 @@ static int __init xsk_init(void) if (err) goto out_proto; + err = register_pernet_subsys(&xsk_net_ops); + if (err) + goto out_sk; return 0; +out_sk: + sock_unregister(PF_XDP); out_proto: proto_unregister(&xsk_proto); out: From 50e74c0131a5b3a3e387798a5705158c04fb3bd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Thu, 24 Jan 2019 19:59:38 +0100 Subject: [PATCH 43/68] xsk: add id to umem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit adds an id to the umem structure. The id uniquely identifies a umem instance, and will be exposed to user-space via the socket monitoring interface. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann --- include/net/xdp_sock.h | 1 + net/xdp/xdp_umem.c | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 13acb9803a6d..61cf7dbb6782 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -42,6 +42,7 @@ struct xdp_umem { struct work_struct work; struct page **pgs; u32 npgs; + int id; struct net_device *dev; struct xdp_umem_fq_reuse *fq_reuse; u16 queue_id; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index a264cf2accd0..eabdb0f59031 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -13,12 +13,15 @@ #include #include #include +#include #include "xdp_umem.h" #include "xsk_queue.h" #define XDP_UMEM_MIN_CHUNK_SIZE 2048 +static DEFINE_IDA(umem_ida); + void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) { unsigned long flags; @@ -183,6 +186,8 @@ static void xdp_umem_release(struct xdp_umem *umem) xdp_umem_clear_dev(umem); + ida_simple_remove(&umem_ida, umem->id); + if (umem->fq) { xskq_destroy(umem->fq); umem->fq = NULL; @@ -389,8 +394,16 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr) if (!umem) return ERR_PTR(-ENOMEM); + err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL); + if (err < 0) { + kfree(umem); + return ERR_PTR(err); + } + umem->id = err; + err = xdp_umem_reg(umem, mr); if (err) { + ida_simple_remove(&umem_ida, umem->id); kfree(umem); return ERR_PTR(err); } From a36b38aa2af61146ea80980a01cf6e952ab021c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Thu, 24 Jan 2019 19:59:39 +0100 Subject: [PATCH 44/68] xsk: add sock_diag interface for AF_XDP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds the sock_diag interface for querying sockets from user space. Tools like iproute2 ss(8) can use this interface to list open AF_XDP sockets. The user-space ABI is defined in linux/xdp_diag.h and includes netlink request and response structs. The request can query sockets and the response contains socket information about the rings, umems, inode and more. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann --- include/uapi/linux/xdp_diag.h | 72 +++++++++++++ net/xdp/Kconfig | 8 ++ net/xdp/Makefile | 1 + net/xdp/xsk.c | 6 +- net/xdp/xsk.h | 12 +++ net/xdp/xsk_diag.c | 191 ++++++++++++++++++++++++++++++++++ 6 files changed, 285 insertions(+), 5 deletions(-) create mode 100644 include/uapi/linux/xdp_diag.h create mode 100644 net/xdp/xsk.h create mode 100644 net/xdp/xsk_diag.c diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h new file mode 100644 index 000000000000..78b2591a7782 --- /dev/null +++ b/include/uapi/linux/xdp_diag.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * xdp_diag: interface for query/monitor XDP sockets + * Copyright(c) 2019 Intel Corporation. + */ + +#ifndef _LINUX_XDP_DIAG_H +#define _LINUX_XDP_DIAG_H + +#include + +struct xdp_diag_req { + __u8 sdiag_family; + __u8 sdiag_protocol; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_show; + __u32 xdiag_cookie[2]; +}; + +struct xdp_diag_msg { + __u8 xdiag_family; + __u8 xdiag_type; + __u16 pad; + __u32 xdiag_ino; + __u32 xdiag_cookie[2]; +}; + +#define XDP_SHOW_INFO (1 << 0) /* Basic information */ +#define XDP_SHOW_RING_CFG (1 << 1) +#define XDP_SHOW_UMEM (1 << 2) +#define XDP_SHOW_MEMINFO (1 << 3) + +enum { + XDP_DIAG_NONE, + XDP_DIAG_INFO, + XDP_DIAG_UID, + XDP_DIAG_RX_RING, + XDP_DIAG_TX_RING, + XDP_DIAG_UMEM, + XDP_DIAG_UMEM_FILL_RING, + XDP_DIAG_UMEM_COMPLETION_RING, + XDP_DIAG_MEMINFO, + __XDP_DIAG_MAX, +}; + +#define XDP_DIAG_MAX (__XDP_DIAG_MAX - 1) + +struct xdp_diag_info { + __u32 ifindex; + __u32 queue_id; +}; + +struct xdp_diag_ring { + __u32 entries; /*num descs */ +}; + +#define XDP_DU_F_ZEROCOPY (1 << 0) + +struct xdp_diag_umem { + __u64 size; + __u32 id; + __u32 num_pages; + __u32 chunk_size; + __u32 headroom; + __u32 ifindex; + __u32 queue_id; + __u32 flags; + __u32 refs; +}; + +#endif /* _LINUX_XDP_DIAG_H */ diff --git a/net/xdp/Kconfig b/net/xdp/Kconfig index 90e4a7152854..0255b33cff4b 100644 --- a/net/xdp/Kconfig +++ b/net/xdp/Kconfig @@ -5,3 +5,11 @@ config XDP_SOCKETS help XDP sockets allows a channel between XDP programs and userspace applications. + +config XDP_SOCKETS_DIAG + tristate "XDP sockets: monitoring interface" + depends on XDP_SOCKETS + default n + help + Support for PF_XDP sockets monitoring interface used by the ss tool. + If unsure, say Y. diff --git a/net/xdp/Makefile b/net/xdp/Makefile index 04f073146256..59dbfdf93dca 100644 --- a/net/xdp/Makefile +++ b/net/xdp/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_XDP_SOCKETS) += xsk.o xdp_umem.o xsk_queue.o +obj-$(CONFIG_XDP_SOCKETS_DIAG) += xsk_diag.o diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 80ca48cefc42..949d3bbccb2f 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -27,14 +27,10 @@ #include "xsk_queue.h" #include "xdp_umem.h" +#include "xsk.h" #define TX_BATCH_SIZE 16 -static struct xdp_sock *xdp_sk(struct sock *sk) -{ - return (struct xdp_sock *)sk; -} - bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) { return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h new file mode 100644 index 000000000000..ba8120610426 --- /dev/null +++ b/net/xdp/xsk.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. */ + +#ifndef XSK_H_ +#define XSK_H_ + +static inline struct xdp_sock *xdp_sk(struct sock *sk) +{ + return (struct xdp_sock *)sk; +} + +#endif /* XSK_H_ */ diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c new file mode 100644 index 000000000000..661d007c3b28 --- /dev/null +++ b/net/xdp/xsk_diag.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* XDP sockets monitoring support + * + * Copyright(c) 2019 Intel Corporation. + * + * Author: Björn Töpel + */ + +#include +#include +#include +#include + +#include "xsk_queue.h" +#include "xsk.h" + +static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_diag_info di = {}; + + di.ifindex = xs->dev ? xs->dev->ifindex : 0; + di.queue_id = xs->queue_id; + return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di); +} + +static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type, + struct sk_buff *nlskb) +{ + struct xdp_diag_ring dr = {}; + + dr.entries = queue->nentries; + return nla_put(nlskb, nl_type, sizeof(dr), &dr); +} + +static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs, + struct sk_buff *nlskb) +{ + int err = 0; + + if (xs->rx) + err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb); + if (!err && xs->tx) + err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb); + return err; +} + +static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_umem *umem = xs->umem; + struct xdp_diag_umem du = {}; + int err; + + if (!umem) + return 0; + + du.id = umem->id; + du.size = umem->size; + du.num_pages = umem->npgs; + du.chunk_size = (__u32)(~umem->chunk_mask + 1); + du.headroom = umem->headroom; + du.ifindex = umem->dev ? umem->dev->ifindex : 0; + du.queue_id = umem->queue_id; + du.flags = 0; + if (umem->zc) + du.flags |= XDP_DU_F_ZEROCOPY; + du.refs = refcount_read(&umem->users); + + err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); + + if (!err && umem->fq) + err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_FILL_RING, nlskb); + if (!err && umem->cq) { + err = xsk_diag_put_ring(xs->tx, XDP_DIAG_UMEM_COMPLETION_RING, + nlskb); + } + return err; +} + +static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, + struct xdp_diag_req *req, + struct user_namespace *user_ns, + u32 portid, u32 seq, u32 flags, int sk_ino) +{ + struct xdp_sock *xs = xdp_sk(sk); + struct xdp_diag_msg *msg; + struct nlmsghdr *nlh; + + nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg), + flags); + if (!nlh) + return -EMSGSIZE; + + msg = nlmsg_data(nlh); + memset(msg, 0, sizeof(*msg)); + msg->xdiag_family = AF_XDP; + msg->xdiag_type = sk->sk_type; + msg->xdiag_ino = sk_ino; + sock_diag_save_cookie(sk, msg->xdiag_cookie); + + if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb)) + goto out_nlmsg_trim; + + if ((req->xdiag_show & XDP_SHOW_INFO) && + nla_put_u32(nlskb, XDP_DIAG_UID, + from_kuid_munged(user_ns, sock_i_uid(sk)))) + goto out_nlmsg_trim; + + if ((req->xdiag_show & XDP_SHOW_RING_CFG) && + xsk_diag_put_rings_cfg(xs, nlskb)) + goto out_nlmsg_trim; + + if ((req->xdiag_show & XDP_SHOW_UMEM) && + xsk_diag_put_umem(xs, nlskb)) + goto out_nlmsg_trim; + + if ((req->xdiag_show & XDP_SHOW_MEMINFO) && + sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) + goto out_nlmsg_trim; + + nlmsg_end(nlskb, nlh); + return 0; + +out_nlmsg_trim: + nlmsg_cancel(nlskb, nlh); + return -EMSGSIZE; +} + +static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb) +{ + struct xdp_diag_req *req = nlmsg_data(cb->nlh); + struct net *net = sock_net(nlskb->sk); + int num = 0, s_num = cb->args[0]; + struct sock *sk; + + mutex_lock(&net->xdp.lock); + + sk_for_each(sk, &net->xdp.list) { + if (!net_eq(sock_net(sk), net)) + continue; + if (num++ < s_num) + continue; + + if (xsk_diag_fill(sk, nlskb, req, + sk_user_ns(NETLINK_CB(cb->skb).sk), + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, NLM_F_MULTI, + sock_i_ino(sk)) < 0) { + num--; + break; + } + } + + mutex_unlock(&net->xdp.lock); + cb->args[0] = num; + return nlskb->len; +} + +static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr) +{ + struct netlink_dump_control c = { .dump = xsk_diag_dump }; + int hdrlen = sizeof(struct xdp_diag_req); + struct net *net = sock_net(nlskb->sk); + + if (nlmsg_len(hdr) < hdrlen) + return -EINVAL; + + if (!(hdr->nlmsg_flags & NLM_F_DUMP)) + return -EOPNOTSUPP; + + return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c); +} + +static const struct sock_diag_handler xsk_diag_handler = { + .family = AF_XDP, + .dump = xsk_diag_handler_dump, +}; + +static int __init xsk_diag_init(void) +{ + return sock_diag_register(&xsk_diag_handler); +} + +static void __exit xsk_diag_exit(void) +{ + sock_diag_unregister(&xsk_diag_handler); +} + +module_init(xsk_diag_init); +module_exit(xsk_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP); From d405c7407a5468d4fc11724d76063e0647d80106 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:25:59 -0500 Subject: [PATCH 45/68] bpf: allocate 0x06 to new eBPF instruction class JMP32 The new eBPF instruction class JMP32 uses the reserved class number 0x6. Kernel BPF ISA documentation updated accordingly. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- Documentation/networking/filter.txt | 15 ++++++++------- include/uapi/linux/bpf.h | 1 + tools/include/uapi/linux/bpf.h | 1 + 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 2196b824e96c..01603bc2eff1 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -865,7 +865,7 @@ Three LSB bits store instruction class which is one of: BPF_STX 0x03 BPF_STX 0x03 BPF_ALU 0x04 BPF_ALU 0x04 BPF_JMP 0x05 BPF_JMP 0x05 - BPF_RET 0x06 [ class 6 unused, for future if needed ] + BPF_RET 0x06 BPF_JMP32 0x06 BPF_MISC 0x07 BPF_ALU64 0x07 When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ... @@ -902,9 +902,9 @@ If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of: BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */ BPF_END 0xd0 /* eBPF only: endianness conversion */ -If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: +If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of: - BPF_JA 0x00 + BPF_JA 0x00 /* BPF_JMP only */ BPF_JEQ 0x10 BPF_JGT 0x20 BPF_JGE 0x30 @@ -912,8 +912,8 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: BPF_JNE 0x50 /* eBPF only: jump != */ BPF_JSGT 0x60 /* eBPF only: signed '>' */ BPF_JSGE 0x70 /* eBPF only: signed '>=' */ - BPF_CALL 0x80 /* eBPF only: function call */ - BPF_EXIT 0x90 /* eBPF only: function return */ + BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */ + BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */ BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ BPF_JSLT 0xc0 /* eBPF only: signed '<' */ @@ -936,8 +936,9 @@ Classic BPF wastes the whole BPF_RET class to represent a single 'ret' operation. Classic BPF_RET | BPF_K means copy imm32 into return register and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT in eBPF means function exit only. The eBPF program needs to store return -value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently -unused and reserved for future use. +value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as +BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide +operands for the comparisons instead. For load and store instructions the 8-bit 'code' field is divided as: diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 2940a9854f6d..60b99b730a41 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -14,6 +14,7 @@ /* Extended instruction set based on top of classic BPF */ /* instruction classes */ +#define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 2940a9854f6d..60b99b730a41 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -14,6 +14,7 @@ /* Extended instruction set based on top of classic BPF */ /* instruction classes */ +#define BPF_JMP32 0x06 /* jmp mode in word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */ /* ld/ldx fields */ From a72dafafbd5f11c6ea3a9682d64da1074f28eb67 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:00 -0500 Subject: [PATCH 46/68] bpf: refactor verifier min/max code for condition jump The current min/max code does both signed and unsigned comparisons against the input argument "val" which is "u64" and there is explicit type casting when the comparison is signed. As we will need slightly more complexer type casting when JMP32 introduced, it is better to host the signed type casting. This makes the code more clean with ignorable runtime overhead. Also, code for J*GE/GT/LT/LE and JEQ/JNE are very similar, this patch combine them. The main purpose for this refactor is to make sure the min/max code will still be readable and with minimum code duplication after JMP32 introduced. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 172 ++++++++++++++++++++++++------------------ 1 file changed, 99 insertions(+), 73 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8cfe39ef770f..eae6cb1fe653 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4033,9 +4033,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, */ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) { + s64 sval; + if (__is_pointer_value(false, reg)) return -1; + sval = (s64)val; + switch (opcode) { case BPF_JEQ: if (tnum_is_const(reg->var_off)) @@ -4058,9 +4062,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSGT: - if (reg->smin_value > (s64)val) + if (reg->smin_value > sval) return 1; - else if (reg->smax_value < (s64)val) + else if (reg->smax_value < sval) return 0; break; case BPF_JLT: @@ -4070,9 +4074,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSLT: - if (reg->smax_value < (s64)val) + if (reg->smax_value < sval) return 1; - else if (reg->smin_value >= (s64)val) + else if (reg->smin_value >= sval) return 0; break; case BPF_JGE: @@ -4082,9 +4086,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSGE: - if (reg->smin_value >= (s64)val) + if (reg->smin_value >= sval) return 1; - else if (reg->smax_value < (s64)val) + else if (reg->smax_value < sval) return 0; break; case BPF_JLE: @@ -4094,9 +4098,9 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return 0; break; case BPF_JSLE: - if (reg->smax_value <= (s64)val) + if (reg->smax_value <= sval) return 1; - else if (reg->smin_value > (s64)val) + else if (reg->smin_value > sval) return 0; break; } @@ -4113,6 +4117,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { + s64 sval; + /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. @@ -4122,19 +4128,22 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, if (__is_pointer_value(false, false_reg)) return; + sval = (s64)val; + switch (opcode) { case BPF_JEQ: - /* If this is false then we know nothing Jon Snow, but if it is - * true then we know for sure. - */ - __mark_reg_known(true_reg, val); - break; case BPF_JNE: - /* If this is true we know nothing Jon Snow, but if it is false - * we know the value for sure; + { + struct bpf_reg_state *reg = + opcode == BPF_JEQ ? true_reg : false_reg; + + /* For BPF_JEQ, if this is false we know nothing Jon Snow, but + * if it is true we know the value for sure. Likewise for + * BPF_JNE. */ - __mark_reg_known(false_reg, val); + __mark_reg_known(reg, val); break; + } case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); @@ -4142,38 +4151,46 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; - case BPF_JGT: - false_reg->umax_value = min(false_reg->umax_value, val); - true_reg->umin_value = max(true_reg->umin_value, val + 1); - break; - case BPF_JSGT: - false_reg->smax_value = min_t(s64, false_reg->smax_value, val); - true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); - break; - case BPF_JLT: - false_reg->umin_value = max(false_reg->umin_value, val); - true_reg->umax_value = min(true_reg->umax_value, val - 1); - break; - case BPF_JSLT: - false_reg->smin_value = max_t(s64, false_reg->smin_value, val); - true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); - break; case BPF_JGE: - false_reg->umax_value = min(false_reg->umax_value, val - 1); - true_reg->umin_value = max(true_reg->umin_value, val); + case BPF_JGT: + { + u64 false_umax = opcode == BPF_JGT ? val : val - 1; + u64 true_umin = opcode == BPF_JGT ? val + 1 : val; + + false_reg->umax_value = min(false_reg->umax_value, false_umax); + true_reg->umin_value = max(true_reg->umin_value, true_umin); break; + } case BPF_JSGE: - false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); - true_reg->smin_value = max_t(s64, true_reg->smin_value, val); + case BPF_JSGT: + { + s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; + s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; + + false_reg->smax_value = min(false_reg->smax_value, false_smax); + true_reg->smin_value = max(true_reg->smin_value, true_smin); break; + } case BPF_JLE: - false_reg->umin_value = max(false_reg->umin_value, val + 1); - true_reg->umax_value = min(true_reg->umax_value, val); + case BPF_JLT: + { + u64 false_umin = opcode == BPF_JLT ? val : val + 1; + u64 true_umax = opcode == BPF_JLT ? val - 1 : val; + + false_reg->umin_value = max(false_reg->umin_value, false_umin); + true_reg->umax_value = min(true_reg->umax_value, true_umax); break; + } case BPF_JSLE: - false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); - true_reg->smax_value = min_t(s64, true_reg->smax_value, val); + case BPF_JSLT: + { + s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; + s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; + + false_reg->smin_value = max(false_reg->smin_value, false_smin); + true_reg->smax_value = min(true_reg->smax_value, true_smax); break; + } default: break; } @@ -4198,22 +4215,23 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { + s64 sval; + if (__is_pointer_value(false, false_reg)) return; + sval = (s64)val; + switch (opcode) { case BPF_JEQ: - /* If this is false then we know nothing Jon Snow, but if it is - * true then we know for sure. - */ - __mark_reg_known(true_reg, val); - break; case BPF_JNE: - /* If this is true we know nothing Jon Snow, but if it is false - * we know the value for sure; - */ - __mark_reg_known(false_reg, val); + { + struct bpf_reg_state *reg = + opcode == BPF_JEQ ? true_reg : false_reg; + + __mark_reg_known(reg, val); break; + } case BPF_JSET: false_reg->var_off = tnum_and(false_reg->var_off, tnum_const(~val)); @@ -4221,38 +4239,46 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, true_reg->var_off = tnum_or(true_reg->var_off, tnum_const(val)); break; - case BPF_JGT: - true_reg->umax_value = min(true_reg->umax_value, val - 1); - false_reg->umin_value = max(false_reg->umin_value, val); - break; - case BPF_JSGT: - true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); - false_reg->smin_value = max_t(s64, false_reg->smin_value, val); - break; - case BPF_JLT: - true_reg->umin_value = max(true_reg->umin_value, val + 1); - false_reg->umax_value = min(false_reg->umax_value, val); - break; - case BPF_JSLT: - true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); - false_reg->smax_value = min_t(s64, false_reg->smax_value, val); - break; case BPF_JGE: - true_reg->umax_value = min(true_reg->umax_value, val); - false_reg->umin_value = max(false_reg->umin_value, val + 1); + case BPF_JGT: + { + u64 false_umin = opcode == BPF_JGT ? val : val + 1; + u64 true_umax = opcode == BPF_JGT ? val - 1 : val; + + false_reg->umin_value = max(false_reg->umin_value, false_umin); + true_reg->umax_value = min(true_reg->umax_value, true_umax); break; + } case BPF_JSGE: - true_reg->smax_value = min_t(s64, true_reg->smax_value, val); - false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); + case BPF_JSGT: + { + s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; + s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; + + false_reg->smin_value = max(false_reg->smin_value, false_smin); + true_reg->smax_value = min(true_reg->smax_value, true_smax); break; + } case BPF_JLE: - true_reg->umin_value = max(true_reg->umin_value, val); - false_reg->umax_value = min(false_reg->umax_value, val - 1); + case BPF_JLT: + { + u64 false_umax = opcode == BPF_JLT ? val : val - 1; + u64 true_umin = opcode == BPF_JLT ? val + 1 : val; + + false_reg->umax_value = min(false_reg->umax_value, false_umax); + true_reg->umin_value = max(true_reg->umin_value, true_umin); break; + } case BPF_JSLE: - true_reg->smin_value = max_t(s64, true_reg->smin_value, val); - false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); + case BPF_JSLT: + { + s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; + s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; + + false_reg->smax_value = min(false_reg->smax_value, false_smax); + true_reg->smin_value = max(true_reg->smin_value, true_smin); break; + } default: break; } From 092ed0968bb648cd18e8a0430cd0a8a71727315c Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:01 -0500 Subject: [PATCH 47/68] bpf: verifier support JMP32 This patch teach verifier about the new BPF_JMP32 instruction class. Verifier need to treat it similar as the existing BPF_JMP class. A BPF_JMP32 insn needs to go through all checks that have been done on BPF_JMP. Also, verifier is doing runtime optimizations based on the extra info conditional jump instruction could offer, especially when the comparison is between constant and register that the value range of the register could be improved based on the comparison results. These code are updated accordingly. Acked-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/core.c | 3 +- kernel/bpf/verifier.c | 203 +++++++++++++++++++++++++++++++++++------- 2 files changed, 173 insertions(+), 33 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2a81b8af3748..1e443ba97310 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -362,7 +362,8 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, insn = prog->insnsi + end_old; } code = insn->code; - if (BPF_CLASS(code) != BPF_JMP || + if ((BPF_CLASS(code) != BPF_JMP && + BPF_CLASS(code) != BPF_JMP32) || BPF_OP(code) == BPF_EXIT) continue; /* Adjust offset of jmps if we cross patch boundaries. */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index eae6cb1fe653..8c1c21cd50b4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1095,7 +1095,7 @@ static int check_subprogs(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; - if (BPF_CLASS(code) != BPF_JMP) + if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; @@ -4031,14 +4031,49 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, * 0 - branch will not be taken and fall-through to next insn * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] */ -static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) +static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, + bool is_jmp32) { + struct bpf_reg_state reg_lo; s64 sval; if (__is_pointer_value(false, reg)) return -1; - sval = (s64)val; + if (is_jmp32) { + reg_lo = *reg; + reg = ®_lo; + /* For JMP32, only low 32 bits are compared, coerce_reg_to_size + * could truncate high bits and update umin/umax according to + * information of low bits. + */ + coerce_reg_to_size(reg, 4); + /* smin/smax need special handling. For example, after coerce, + * if smin_value is 0x00000000ffffffffLL, the value is -1 when + * used as operand to JMP32. It is a negative number from s32's + * point of view, while it is a positive number when seen as + * s64. The smin/smax are kept as s64, therefore, when used with + * JMP32, they need to be transformed into s32, then sign + * extended back to s64. + * + * Also, smin/smax were copied from umin/umax. If umin/umax has + * different sign bit, then min/max relationship doesn't + * maintain after casting into s32, for this case, set smin/smax + * to safest range. + */ + if ((reg->umax_value ^ reg->umin_value) & + (1ULL << 31)) { + reg->smin_value = S32_MIN; + reg->smax_value = S32_MAX; + } + reg->smin_value = (s64)(s32)reg->smin_value; + reg->smax_value = (s64)(s32)reg->smax_value; + + val = (u32)val; + sval = (s64)(s32)val; + } else { + sval = (s64)val; + } switch (opcode) { case BPF_JEQ: @@ -4108,6 +4143,29 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return -1; } +/* Generate min value of the high 32-bit from TNUM info. */ +static u64 gen_hi_min(struct tnum var) +{ + return var.value & ~0xffffffffULL; +} + +/* Generate max value of the high 32-bit from TNUM info. */ +static u64 gen_hi_max(struct tnum var) +{ + return (var.value | var.mask) & ~0xffffffffULL; +} + +/* Return true if VAL is compared with a s64 sign extended from s32, and they + * are with the same signedness. + */ +static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) +{ + return ((s32)sval >= 0 && + reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || + ((s32)sval < 0 && + reg->smax_value <= 0 && reg->smin_value >= S32_MIN); +} + /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. @@ -4115,7 +4173,7 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode) + u8 opcode, bool is_jmp32) { s64 sval; @@ -4128,7 +4186,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, if (__is_pointer_value(false, false_reg)) return; - sval = (s64)val; + val = is_jmp32 ? (u32)val : val; + sval = is_jmp32 ? (s64)(s32)val : (s64)val; switch (opcode) { case BPF_JEQ: @@ -4141,7 +4200,15 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, * if it is true we know the value for sure. Likewise for * BPF_JNE. */ - __mark_reg_known(reg, val); + if (is_jmp32) { + u64 old_v = reg->var_off.value; + u64 hi_mask = ~0xffffffffULL; + + reg->var_off.value = (old_v & hi_mask) | val; + reg->var_off.mask &= hi_mask; + } else { + __mark_reg_known(reg, val); + } break; } case BPF_JSET: @@ -4157,6 +4224,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JGT ? val : val - 1; u64 true_umin = opcode == BPF_JGT ? val + 1 : val; + if (is_jmp32) { + false_umax += gen_hi_max(false_reg->var_off); + true_umin += gen_hi_min(true_reg->var_off); + } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4167,6 +4238,11 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; + /* If the full s64 was not sign-extended from s32 then don't + * deduct further info. + */ + if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) + break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4177,6 +4253,10 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JLT ? val : val + 1; u64 true_umax = opcode == BPF_JLT ? val - 1 : val; + if (is_jmp32) { + false_umin += gen_hi_min(false_reg->var_off); + true_umax += gen_hi_max(true_reg->var_off); + } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax); break; @@ -4187,6 +4267,8 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; + if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) + break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4213,14 +4295,15 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, - u8 opcode) + u8 opcode, bool is_jmp32) { s64 sval; if (__is_pointer_value(false, false_reg)) return; - sval = (s64)val; + val = is_jmp32 ? (u32)val : val; + sval = is_jmp32 ? (s64)(s32)val : (s64)val; switch (opcode) { case BPF_JEQ: @@ -4229,7 +4312,15 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *reg = opcode == BPF_JEQ ? true_reg : false_reg; - __mark_reg_known(reg, val); + if (is_jmp32) { + u64 old_v = reg->var_off.value; + u64 hi_mask = ~0xffffffffULL; + + reg->var_off.value = (old_v & hi_mask) | val; + reg->var_off.mask &= hi_mask; + } else { + __mark_reg_known(reg, val); + } break; } case BPF_JSET: @@ -4245,6 +4336,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umin = opcode == BPF_JGT ? val : val + 1; u64 true_umax = opcode == BPF_JGT ? val - 1 : val; + if (is_jmp32) { + false_umin += gen_hi_min(false_reg->var_off); + true_umax += gen_hi_max(true_reg->var_off); + } false_reg->umin_value = max(false_reg->umin_value, false_umin); true_reg->umax_value = min(true_reg->umax_value, true_umax); break; @@ -4255,6 +4350,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; + if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) + break; false_reg->smin_value = max(false_reg->smin_value, false_smin); true_reg->smax_value = min(true_reg->smax_value, true_smax); break; @@ -4265,6 +4362,10 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, u64 false_umax = opcode == BPF_JLT ? val : val - 1; u64 true_umin = opcode == BPF_JLT ? val + 1 : val; + if (is_jmp32) { + false_umax += gen_hi_max(false_reg->var_off); + true_umin += gen_hi_min(true_reg->var_off); + } false_reg->umax_value = min(false_reg->umax_value, false_umax); true_reg->umin_value = max(true_reg->umin_value, true_umin); break; @@ -4275,6 +4376,8 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; + if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) + break; false_reg->smax_value = min(false_reg->smax_value, false_smax); true_reg->smin_value = max(true_reg->smin_value, true_smin); break; @@ -4416,6 +4519,10 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, if (BPF_SRC(insn->code) != BPF_X) return false; + /* Pointers are always 64-bit. */ + if (BPF_CLASS(insn->code) == BPF_JMP32) + return false; + switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && @@ -4508,16 +4615,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); + bool is_jmp32; int err; - if (opcode > BPF_JSLE) { - verbose(env, "invalid BPF_JMP opcode %x\n", opcode); + /* Only conditional jumps are expected to reach here. */ + if (opcode == BPF_JA || opcode > BPF_JSLE) { + verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { - verbose(env, "BPF_JMP uses reserved fields\n"); + verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); return -EINVAL; } @@ -4533,7 +4642,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } } else { if (insn->src_reg != BPF_REG_0) { - verbose(env, "BPF_JMP uses reserved fields\n"); + verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); return -EINVAL; } } @@ -4544,9 +4653,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return err; dst_reg = ®s[insn->dst_reg]; + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; if (BPF_SRC(insn->code) == BPF_K) { - int pred = is_branch_taken(dst_reg, insn->imm, opcode); + int pred = is_branch_taken(dst_reg, insn->imm, opcode, + is_jmp32); if (pred == 1) { /* only follow the goto, ignore fall-through */ @@ -4574,30 +4685,51 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { + struct bpf_reg_state *src_reg = ®s[insn->src_reg]; + struct bpf_reg_state lo_reg0 = *dst_reg; + struct bpf_reg_state lo_reg1 = *src_reg; + struct bpf_reg_state *src_lo, *dst_lo; + + dst_lo = &lo_reg0; + src_lo = &lo_reg1; + coerce_reg_to_size(dst_lo, 4); + coerce_reg_to_size(src_lo, 4); + if (dst_reg->type == SCALAR_VALUE && - regs[insn->src_reg].type == SCALAR_VALUE) { - if (tnum_is_const(regs[insn->src_reg].var_off)) + src_reg->type == SCALAR_VALUE) { + if (tnum_is_const(src_reg->var_off) || + (is_jmp32 && tnum_is_const(src_lo->var_off))) reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, regs[insn->src_reg].var_off.value, - opcode); - else if (tnum_is_const(dst_reg->var_off)) + dst_reg, + is_jmp32 + ? src_lo->var_off.value + : src_reg->var_off.value, + opcode, is_jmp32); + else if (tnum_is_const(dst_reg->var_off) || + (is_jmp32 && tnum_is_const(dst_lo->var_off))) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], - ®s[insn->src_reg], - dst_reg->var_off.value, opcode); - else if (opcode == BPF_JEQ || opcode == BPF_JNE) + src_reg, + is_jmp32 + ? dst_lo->var_off.value + : dst_reg->var_off.value, + opcode, is_jmp32); + else if (!is_jmp32 && + (opcode == BPF_JEQ || opcode == BPF_JNE)) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], - ®s[insn->src_reg], - ®s[insn->dst_reg], opcode); + src_reg, dst_reg, opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], - dst_reg, insn->imm, opcode); + dst_reg, insn->imm, opcode, is_jmp32); } - /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ - if (BPF_SRC(insn->code) == BPF_K && + /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). + * NOTE: these optimizations below are related with pointer comparison + * which will never be JMP32. + */ + if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && reg_type_may_be_null(dst_reg->type)) { /* Mark all identical registers in each branch as either @@ -4926,7 +5058,8 @@ peek_stack: goto check_state; t = insn_stack[cur_stack - 1]; - if (BPF_CLASS(insns[t].code) == BPF_JMP) { + if (BPF_CLASS(insns[t].code) == BPF_JMP || + BPF_CLASS(insns[t].code) == BPF_JMP32) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { @@ -6082,7 +6215,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - } else if (class == BPF_JMP) { + } else if (class == BPF_JMP || class == BPF_JMP32) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { @@ -6090,7 +6223,8 @@ static int do_check(struct bpf_verifier_env *env) insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || - insn->dst_reg != BPF_REG_0) { + insn->dst_reg != BPF_REG_0 || + class == BPF_JMP32) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } @@ -6106,7 +6240,8 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0) { + insn->dst_reg != BPF_REG_0 || + class == BPF_JMP32) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } @@ -6118,7 +6253,8 @@ static int do_check(struct bpf_verifier_env *env) if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || - insn->dst_reg != BPF_REG_0) { + insn->dst_reg != BPF_REG_0 || + class == BPF_JMP32) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } @@ -6635,6 +6771,9 @@ static bool insn_is_cond_jump(u8 code) { u8 op; + if (BPF_CLASS(code) == BPF_JMP32) + return true; + if (BPF_CLASS(code) != BPF_JMP) return false; From 56cbd82ef0b3dc47a16beeebc8d9a9a9269093dc Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:02 -0500 Subject: [PATCH 48/68] bpf: disassembler support JMP32 This patch teaches disassembler about JMP32. There are two places to update: - Class 0x6 now used by BPF_JMP32, not "unused". - BPF_JMP32 need to show comparison operands properly. The disassemble format is to add an extra "(32)" before the operands if it is a sub-register. A better disassemble format for both JMP32 and ALU32 just show the register prefix as "w" instead of "r", this is the format using by LLVM assembler. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/disasm.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index d6b76377cb6e..de73f55e42fd 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -67,7 +67,7 @@ const char *const bpf_class_string[8] = { [BPF_STX] = "stx", [BPF_ALU] = "alu", [BPF_JMP] = "jmp", - [BPF_RET] = "BUG", + [BPF_JMP32] = "jmp32", [BPF_ALU64] = "alu64", }; @@ -136,23 +136,22 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, else print_bpf_end_insn(verbose, cbs->private_data, insn); } else if (BPF_OP(insn->code) == BPF_NEG) { - verbose(cbs->private_data, "(%02x) r%d = %s-r%d\n", - insn->code, insn->dst_reg, - class == BPF_ALU ? "(u32) " : "", + verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', + insn->dst_reg, class == BPF_ALU ? 'w' : 'r', insn->dst_reg); } else if (BPF_SRC(insn->code) == BPF_X) { - verbose(cbs->private_data, "(%02x) %sr%d %s %sr%d\n", - insn->code, class == BPF_ALU ? "(u32) " : "", + verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', insn->dst_reg, bpf_alu_string[BPF_OP(insn->code) >> 4], - class == BPF_ALU ? "(u32) " : "", + class == BPF_ALU ? 'w' : 'r', insn->src_reg); } else { - verbose(cbs->private_data, "(%02x) %sr%d %s %s%d\n", - insn->code, class == BPF_ALU ? "(u32) " : "", + verbose(cbs->private_data, "(%02x) %c%d %s %d\n", + insn->code, class == BPF_ALU ? 'w' : 'r', insn->dst_reg, bpf_alu_string[BPF_OP(insn->code) >> 4], - class == BPF_ALU ? "(u32) " : "", insn->imm); } } else if (class == BPF_STX) { @@ -220,7 +219,7 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code); return; } - } else if (class == BPF_JMP) { + } else if (class == BPF_JMP32 || class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { @@ -244,13 +243,18 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, } else if (insn->code == (BPF_JMP | BPF_EXIT)) { verbose(cbs->private_data, "(%02x) exit\n", insn->code); } else if (BPF_SRC(insn->code) == BPF_X) { - verbose(cbs->private_data, "(%02x) if r%d %s r%d goto pc%+d\n", - insn->code, insn->dst_reg, + verbose(cbs->private_data, + "(%02x) if %c%d %s %c%d goto pc%+d\n", + insn->code, class == BPF_JMP32 ? 'w' : 'r', + insn->dst_reg, bpf_jmp_string[BPF_OP(insn->code) >> 4], + class == BPF_JMP32 ? 'w' : 'r', insn->src_reg, insn->off); } else { - verbose(cbs->private_data, "(%02x) if r%d %s 0x%x goto pc%+d\n", - insn->code, insn->dst_reg, + verbose(cbs->private_data, + "(%02x) if %c%d %s 0x%x goto pc%+d\n", + insn->code, class == BPF_JMP32 ? 'w' : 'r', + insn->dst_reg, bpf_jmp_string[BPF_OP(insn->code) >> 4], insn->imm, insn->off); } From df791dc167603efb676447a2d7b769a19e8fd722 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:03 -0500 Subject: [PATCH 49/68] tools: bpftool: teach cfg code about JMP32 The cfg code need to be aware of the new JMP32 instruction class so it could partition functions correctly. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/cfg.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c index 31f0db41513f..3e21f994f262 100644 --- a/tools/bpf/bpftool/cfg.c +++ b/tools/bpf/bpftool/cfg.c @@ -157,6 +157,11 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur, return false; } +static bool is_jmp_insn(u8 code) +{ + return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32; +} + static bool func_partition_bb_head(struct func_node *func) { struct bpf_insn *cur, *end; @@ -170,7 +175,7 @@ static bool func_partition_bb_head(struct func_node *func) return true; for (; cur <= end; cur++) { - if (BPF_CLASS(cur->code) == BPF_JMP) { + if (is_jmp_insn(cur->code)) { u8 opcode = BPF_OP(cur->code); if (opcode == BPF_EXIT || opcode == BPF_CALL) @@ -296,7 +301,7 @@ static bool func_add_bb_edges(struct func_node *func) e->src = bb; insn = bb->tail; - if (BPF_CLASS(insn->code) != BPF_JMP || + if (!is_jmp_insn(insn->code) || BPF_OP(insn->code) == BPF_EXIT) { e->dst = bb_next(bb); e->flags |= EDGE_FLAG_FALLTHROUGH; From 503a8865a47752d0ac2ff642f07e96e8b2103178 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:04 -0500 Subject: [PATCH 50/68] bpf: interpreter support for JMP32 This patch implements interpreting new JMP32 instructions. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- kernel/bpf/core.c | 197 +++++++++++++++------------------------------- 1 file changed, 63 insertions(+), 134 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1e443ba97310..bba11c2565ee 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1145,6 +1145,31 @@ EXPORT_SYMBOL_GPL(__bpf_call_base); INSN_2(JMP, CALL), \ /* Exit instruction. */ \ INSN_2(JMP, EXIT), \ + /* 32-bit Jump instructions. */ \ + /* Register based. */ \ + INSN_3(JMP32, JEQ, X), \ + INSN_3(JMP32, JNE, X), \ + INSN_3(JMP32, JGT, X), \ + INSN_3(JMP32, JLT, X), \ + INSN_3(JMP32, JGE, X), \ + INSN_3(JMP32, JLE, X), \ + INSN_3(JMP32, JSGT, X), \ + INSN_3(JMP32, JSLT, X), \ + INSN_3(JMP32, JSGE, X), \ + INSN_3(JMP32, JSLE, X), \ + INSN_3(JMP32, JSET, X), \ + /* Immediate based. */ \ + INSN_3(JMP32, JEQ, K), \ + INSN_3(JMP32, JNE, K), \ + INSN_3(JMP32, JGT, K), \ + INSN_3(JMP32, JLT, K), \ + INSN_3(JMP32, JGE, K), \ + INSN_3(JMP32, JLE, K), \ + INSN_3(JMP32, JSGT, K), \ + INSN_3(JMP32, JSLT, K), \ + INSN_3(JMP32, JSGE, K), \ + INSN_3(JMP32, JSLE, K), \ + INSN_3(JMP32, JSET, K), \ /* Jump instructions. */ \ /* Register based. */ \ INSN_3(JMP, JEQ, X), \ @@ -1405,145 +1430,49 @@ select_insn: out: CONT; } - /* JMP */ JMP_JA: insn += insn->off; CONT; - JMP_JEQ_X: - if (DST == SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JEQ_K: - if (DST == IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JNE_X: - if (DST != SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JNE_K: - if (DST != IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JGT_X: - if (DST > SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JGT_K: - if (DST > IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JLT_X: - if (DST < SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JLT_K: - if (DST < IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JGE_X: - if (DST >= SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JGE_K: - if (DST >= IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JLE_X: - if (DST <= SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JLE_K: - if (DST <= IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSGT_X: - if (((s64) DST) > ((s64) SRC)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSGT_K: - if (((s64) DST) > ((s64) IMM)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSLT_X: - if (((s64) DST) < ((s64) SRC)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSLT_K: - if (((s64) DST) < ((s64) IMM)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSGE_X: - if (((s64) DST) >= ((s64) SRC)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSGE_K: - if (((s64) DST) >= ((s64) IMM)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSLE_X: - if (((s64) DST) <= ((s64) SRC)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSLE_K: - if (((s64) DST) <= ((s64) IMM)) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSET_X: - if (DST & SRC) { - insn += insn->off; - CONT_JMP; - } - CONT; - JMP_JSET_K: - if (DST & IMM) { - insn += insn->off; - CONT_JMP; - } - CONT; JMP_EXIT: return BPF_R0; - + /* JMP */ +#define COND_JMP(SIGN, OPCODE, CMP_OP) \ + JMP_##OPCODE##_X: \ + if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ + insn += insn->off; \ + CONT_JMP; \ + } \ + CONT; \ + JMP32_##OPCODE##_X: \ + if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ + insn += insn->off; \ + CONT_JMP; \ + } \ + CONT; \ + JMP_##OPCODE##_K: \ + if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ + insn += insn->off; \ + CONT_JMP; \ + } \ + CONT; \ + JMP32_##OPCODE##_K: \ + if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ + insn += insn->off; \ + CONT_JMP; \ + } \ + CONT; + COND_JMP(u, JEQ, ==) + COND_JMP(u, JNE, !=) + COND_JMP(u, JGT, >) + COND_JMP(u, JLT, <) + COND_JMP(u, JGE, >=) + COND_JMP(u, JLE, <=) + COND_JMP(u, JSET, &) + COND_JMP(s, JSGT, >) + COND_JMP(s, JSLT, <) + COND_JMP(s, JSGE, >=) + COND_JMP(s, JSLE, <=) +#undef COND_JMP /* STX and ST and LDX*/ #define LDST(SIZEOP, SIZE) \ STX_MEM_##SIZEOP: \ From a7b76c8857692b0fce063b94ed83da11c396d341 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:05 -0500 Subject: [PATCH 51/68] bpf: JIT blinds support JMP32 This patch adds JIT blinds support for JMP32. Like BPF_JMP_REG/IMM, JMP32 version are needed for building raw bpf insn. They are added to both include/linux/filter.h and tools/include/linux/filter.h. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 20 ++++++++++++++++++++ kernel/bpf/core.c | 21 +++++++++++++++++++++ tools/include/linux/filter.h | 20 ++++++++++++++++++++ 3 files changed, 61 insertions(+) diff --git a/include/linux/filter.h b/include/linux/filter.h index be9af6b4a9e4..e4b473f85b46 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -277,6 +277,26 @@ struct sock_reuseport; .off = OFF, \ .imm = IMM }) +/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + /* Unconditional jumps, goto pc + off16 */ #define BPF_JMP_A(OFF) \ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index bba11c2565ee..a7bcb23bee84 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -949,6 +949,27 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); break; + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + /* Accommodate for extra offset in case of a backjump. */ + off = from->off; + if (off < 0) + off -= 2; + *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); + *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); + *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, + off); + break; + case BPF_LD | BPF_IMM | BPF_DW: *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h index af55acf73e75..cce0b02c0e28 100644 --- a/tools/include/linux/filter.h +++ b/tools/include/linux/filter.h @@ -199,6 +199,16 @@ .off = OFF, \ .imm = 0 }) +/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ @@ -209,6 +219,16 @@ .off = OFF, \ .imm = IMM }) +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + /* Unconditional jumps, goto pc + off16 */ #define BPF_JMP_A(OFF) \ From 3f5d6525f21990e6b0d5c6b31a6e95c9cdffa831 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:06 -0500 Subject: [PATCH 52/68] x86_64: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on x86_64. Cc: Alexei Starovoitov Cc: Daniel Borkmann Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 46 ++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5542303c43d9..afabf597c855 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -881,20 +881,41 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: /* test dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; @@ -908,8 +929,21 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); From 69f827eb6e14bdc9c7894fba3a00ca93aaa98a73 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:07 -0500 Subject: [PATCH 53/68] x32: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on x32. Also fixed several reverse xmas tree coding style issues as I am there. Cc: Wang YanQing Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp32.c | 121 ++++++++++++++++++++++++---------- 1 file changed, 85 insertions(+), 36 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 8f6cc71e0848..0d9cdffce6ac 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2072,7 +2072,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: - case BPF_JMP | BPF_JSGE | BPF_X: { + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2081,25 +2092,35 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_X: { + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2108,15 +2129,21 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); @@ -2126,32 +2153,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32); /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi); + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JEQ | BPF_K: @@ -2163,29 +2197,44 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: - case BPF_JMP | BPF_JSGE | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); - - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); From 654b65a04880454efb7d3287f011f2f790963b32 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:08 -0500 Subject: [PATCH 54/68] arm64: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on arm64. Cc: Daniel Borkmann Cc: Alexei Starovoitov Cc: Zi Shen Lim Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/arm64/net/bpf_jit_comp.c | 37 ++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 1542df00b23c..aaddc0217e73 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -362,7 +362,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; - const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool is64 = BPF_CLASS(code) == BPF_ALU64 || + BPF_CLASS(code) == BPF_JMP; const bool isdw = BPF_SIZE(code) == BPF_DW; u8 jmp_cond; s32 jmp_offset; @@ -559,7 +560,17 @@ emit_bswap_uxt: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: - emit(A64_CMP(1, dst, src), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + emit(A64_CMP(is64, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); check_imm19(jmp_offset); @@ -601,7 +612,8 @@ emit_cond_jmp: emit(A64_B_(jmp_cond, jmp_offset), ctx); break; case BPF_JMP | BPF_JSET | BPF_X: - emit(A64_TST(1, dst, src), ctx); + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(A64_TST(is64, dst, src), ctx); goto emit_cond_jmp; /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: @@ -614,12 +626,23 @@ emit_cond_jmp: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_CMP(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_CMP(is64, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_TST(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JSET | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_TST(is64, dst, tmp), ctx); goto emit_cond_jmp; /* function call */ case BPF_JMP | BPF_CALL: From b85062ac0df33a6a1dcaccd07fe7b1ca55194b25 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:09 -0500 Subject: [PATCH 55/68] arm: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on arm. For JSET, "ands" (AND with flags updated) is used, so corresponding encoding helper is added. Cc: Shubham Bansal Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/arm/net/bpf_jit_32.c | 53 ++++++++++++++++++++++++++++++++------- arch/arm/net/bpf_jit_32.h | 2 ++ 2 files changed, 46 insertions(+), 9 deletions(-) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 25b3ee85066e..c8bfbbfdfcc3 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1083,12 +1083,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, /* Arithmatic Operation */ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, - const u8 rn, struct jit_ctx *ctx, u8 op) { + const u8 rn, struct jit_ctx *ctx, u8 op, + bool is_jmp64) { switch (op) { case BPF_JSET: - emit(ARM_AND_R(ARM_IP, rt, rn), ctx); - emit(ARM_AND_R(ARM_LR, rd, rm), ctx); - emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + if (is_jmp64) { + emit(ARM_AND_R(ARM_IP, rt, rn), ctx); + emit(ARM_AND_R(ARM_LR, rd, rm), ctx); + emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + } else { + emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); + } break; case BPF_JEQ: case BPF_JNE: @@ -1096,18 +1101,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, case BPF_JGE: case BPF_JLE: case BPF_JLT: - emit(ARM_CMP_R(rd, rm), ctx); - _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + if (is_jmp64) { + emit(ARM_CMP_R(rd, rm), ctx); + /* Only compare low halve if high halve are equal. */ + _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + } else { + emit(ARM_CMP_R(rt, rn), ctx); + } break; case BPF_JSLE: case BPF_JSGT: emit(ARM_CMP_R(rn, rt), ctx); - emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); break; case BPF_JSLT: case BPF_JSGE: emit(ARM_CMP_R(rt, rn), ctx); - emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); break; } } @@ -1615,6 +1627,17 @@ exit: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* Setup source registers */ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); @@ -1641,6 +1664,17 @@ exit: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: if (off == 0) break; rm = tmp2[0]; @@ -1652,7 +1686,8 @@ go_jmp: rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ - emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code)); + emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), + BPF_CLASS(code) == BPF_JMP); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index f4e58bcdaa43..13a05f759552 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -62,6 +62,7 @@ #define ARM_INST_ADDS_I 0x02900000 #define ARM_INST_AND_R 0x00000000 +#define ARM_INST_ANDS_R 0x00100000 #define ARM_INST_AND_I 0x02000000 #define ARM_INST_BIC_R 0x01c00000 @@ -172,6 +173,7 @@ #define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) +#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm) #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) From 5f6459966d0abca8b799440f6e1b18dac153c54d Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:10 -0500 Subject: [PATCH 56/68] ppc: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on ppc. For JMP32 | JSET, instruction encoding for PPC_RLWINM_DOT is added to check the result of ANDing low 32-bit of operands. Cc: Naveen N. Rao Cc: Sandipan Das Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/powerpc/include/asm/ppc-opcode.h | 1 + arch/powerpc/net/bpf_jit.h | 4 + arch/powerpc/net/bpf_jit_comp64.c | 120 +++++++++++++++++++++++--- 3 files changed, 111 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 19a8834e0398..f9513ad38fa6 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -337,6 +337,7 @@ #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 #define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLWINM_DOT 0x54000001 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 #define PPC_INST_RLDICR 0x78000004 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index c2d5192ed64f..549e9490ff2a 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -165,6 +165,10 @@ #define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \ + ___PPC_RA(d) | ___PPC_RS(a) | \ + __PPC_SH(i) | __PPC_MB(mb) | \ + __PPC_ME(me)) #define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 7ce57657d3b8..15bba765fa79 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -768,36 +768,58 @@ emit_clear: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: true_cond = COND_GT; goto cond_branch; case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: true_cond = COND_LT; goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: true_cond = COND_GE; goto cond_branch; case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: true_cond = COND_LE; goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: true_cond = COND_EQ; goto cond_branch; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: true_cond = COND_NE; goto cond_branch; case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: true_cond = COND_NE; /* Fall through */ @@ -809,18 +831,44 @@ cond_branch: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: /* unsigned comparison */ - PPC_CMPLD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPLW(dst_reg, src_reg); + else + PPC_CMPLD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* signed comparison */ - PPC_CMPD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPW(dst_reg, src_reg); + else + PPC_CMPD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSET | BPF_X: - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg); + case BPF_JMP32 | BPF_JSET | BPF_X: + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, + src_reg); + } else { + int tmp_reg = b2p[TMP_REG_1]; + + PPC_AND(tmp_reg, dst_reg, src_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0, + 31); + } break; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: @@ -828,43 +876,87 @@ cond_branch: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * Need sign-extended load, so only positive * values can be used as imm in cmpldi */ - if (imm >= 0 && imm < 32768) - PPC_CMPLDI(dst_reg, imm); - else { + if (imm >= 0 && imm < 32768) { + if (is_jmp32) + PPC_CMPLWI(dst_reg, imm); + else + PPC_CMPLDI(dst_reg, imm); + } else { /* sign-extending load */ PPC_LI32(b2p[TMP_REG_1], imm); /* ... but unsigned comparison */ - PPC_CMPLD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPLW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPLD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * signed comparison, so any 16-bit value * can be used in cmpdi */ - if (imm >= -32768 && imm < 32768) - PPC_CMPDI(dst_reg, imm); - else { + if (imm >= -32768 && imm < 32768) { + if (is_jmp32) + PPC_CMPWI(dst_reg, imm); + else + PPC_CMPDI(dst_reg, imm); + } else { PPC_LI32(b2p[TMP_REG_1], imm); - PPC_CMPD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) /* PPC_ANDI is _only/always_ dot-form */ PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm); else { - PPC_LI32(b2p[TMP_REG_1], imm); - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, - b2p[TMP_REG_1]); + int tmp_reg = b2p[TMP_REG_1]; + + PPC_LI32(tmp_reg, imm); + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(tmp_reg, dst_reg, + tmp_reg); + } else { + PPC_AND(tmp_reg, dst_reg, + tmp_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, + 0, 0, 31); + } } break; } From 626a5f66da0d19227253325e1d0f1b3d7a708342 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:11 -0500 Subject: [PATCH 57/68] s390: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on s390. Cc: Martin Schwidefsky Cc: Heiko Carstens Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 66 ++++++++++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 14 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 3ff758eeb71d..ce9defdff62a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1110,103 +1110,141 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i mask = 0xf000; /* j */ goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ + case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */ mask = 0x2000; /* jh */ goto branch_ks; case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ + case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */ mask = 0x4000; /* jl */ goto branch_ks; case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ + case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */ mask = 0xa000; /* jhe */ goto branch_ks; case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ + case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */ mask = 0xc000; /* jle */ goto branch_ks; case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ + case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */ mask = 0x2000; /* jh */ goto branch_ku; case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ + case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */ mask = 0x4000; /* jl */ goto branch_ku; case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ + case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */ mask = 0xa000; /* jhe */ goto branch_ku; case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ + case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */ mask = 0xc000; /* jle */ goto branch_ku; case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ + case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */ mask = 0x7000; /* jne */ goto branch_ku; case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */ + case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */ mask = 0x8000; /* je */ goto branch_ku; case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */ + case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */ mask = 0x7000; /* jnz */ - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* ngr %w1,%dst */ - EMIT4(0xb9800000, REG_W1, dst_reg); + if (BPF_CLASS(insn->code) == BPF_JMP32) { + /* llilf %w1,imm (load zero extend imm) */ + EMIT6_IMM(0xc0010000, REG_W1, imm); + /* nr %w1,%dst */ + EMIT2(0x1400, REG_W1, dst_reg); + } else { + /* lgfi %w1,imm (load sign extend imm) */ + EMIT6_IMM(0xc0010000, REG_W1, imm); + /* ngr %w1,%dst */ + EMIT4(0xb9800000, REG_W1, dst_reg); + } goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ + case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */ mask = 0x2000; /* jh */ goto branch_xs; case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ + case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */ mask = 0x4000; /* jl */ goto branch_xs; case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ + case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */ mask = 0xa000; /* jhe */ goto branch_xs; case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ + case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */ mask = 0xc000; /* jle */ goto branch_xs; case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ + case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */ mask = 0x2000; /* jh */ goto branch_xu; case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ + case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */ mask = 0x4000; /* jl */ goto branch_xu; case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ + case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */ mask = 0xa000; /* jhe */ goto branch_xu; case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ + case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */ mask = 0xc000; /* jle */ goto branch_xu; case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ + case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */ mask = 0x7000; /* jne */ goto branch_xu; case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */ + case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */ mask = 0x8000; /* je */ goto branch_xu; case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */ + case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */ + { + bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + mask = 0x7000; /* jnz */ - /* ngrk %w1,%dst,%src */ - EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg); + /* nrk or ngrk %w1,%dst,%src */ + EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000), + REG_W1, dst_reg, src_reg); goto branch_oc; branch_ks: /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* cgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask); + /* crj or cgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, REG_W1, i, off, mask); break; branch_ku: /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* clgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask); + /* clrj or clgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, REG_W1, i, off, mask); break; branch_xs: - /* cgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask); + /* crj or cgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, src_reg, i, off, mask); break; branch_xu: - /* clgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask); + /* clrj or clgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, src_reg, i, off, mask); break; branch_oc: /* brc mask,jmp_off (branch instruction needs 4 bytes) */ jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); break; + } default: /* too complex, give up */ pr_err("Unknown opcode %02x\n", insn->code); return -1; From 461448398ac104b014530680dbac09e577cd2695 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:12 -0500 Subject: [PATCH 58/68] nfp: bpf: implement jitting of JMP32 This patch implements code-gen for new JMP32 instructions on NFP. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 97 ++++++++++++++----- drivers/net/ethernet/netronome/nfp/bpf/main.h | 22 ++++- 2 files changed, 93 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 3308fd92c017..aa3a2098a583 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1334,8 +1334,9 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, insn->src_reg * 2, br_mask, insn->off); - wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, - insn->src_reg * 2 + 1, br_mask, insn->off); + if (is_mbpf_jmp64(meta)) + wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, + insn->src_reg * 2 + 1, br_mask, insn->off); return 0; } @@ -1390,13 +1391,15 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) else emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); - tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); - if (!code->swap) - emit_alu(nfp_prog, reg_none(), - reg_a(reg + 1), carry_op, tmp_reg); - else - emit_alu(nfp_prog, reg_none(), - tmp_reg, carry_op, reg_a(reg + 1)); + if (is_mbpf_jmp64(meta)) { + tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); + if (!code->swap) + emit_alu(nfp_prog, reg_none(), + reg_a(reg + 1), carry_op, tmp_reg); + else + emit_alu(nfp_prog, reg_none(), + tmp_reg, carry_op, reg_a(reg + 1)); + } emit_br(nfp_prog, code->br_mask, insn->off, 0); @@ -1423,8 +1426,9 @@ static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); - emit_alu(nfp_prog, reg_none(), - reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); + if (is_mbpf_jmp64(meta)) + emit_alu(nfp_prog, reg_none(), + reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; @@ -3048,6 +3052,19 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + swreg tmp_reg; + + tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog)); + emit_alu(nfp_prog, reg_none(), + reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); + emit_br(nfp_prog, BR_BEQ, insn->off, 0); + + return 0; +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -3061,9 +3078,10 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* Upper word of the mask can only be 0 or ~0 from sign extension, * so either ignore it or OR the whole thing in. */ - if (imm >> 32) + if (is_mbpf_jmp64(meta) && imm >> 32) { emit_alu(nfp_prog, reg_none(), reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); + } emit_br(nfp_prog, BR_BNE, insn->off, 0); return 0; @@ -3073,11 +3091,16 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ + bool is_jmp32 = is_mbpf_jmp32(meta); swreg tmp_reg; if (!imm) { - emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), - ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); + if (is_jmp32) + emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE, + reg_b(insn->dst_reg * 2)); + else + emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), + ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); emit_br(nfp_prog, BR_BNE, insn->off, 0); return 0; } @@ -3087,6 +3110,9 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); emit_br(nfp_prog, BR_BNE, insn->off, 0); + if (is_jmp32) + return 0; + tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); @@ -3101,10 +3127,13 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), ALU_OP_XOR, reg_b(insn->src_reg * 2)); - emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), - ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); - emit_alu(nfp_prog, reg_none(), - imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); + if (is_mbpf_jmp64(meta)) { + emit_alu(nfp_prog, imm_b(nfp_prog), + reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, + reg_b(insn->src_reg * 2 + 1)); + emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, + imm_b(nfp_prog)); + } emit_br(nfp_prog, BR_BEQ, insn->off, 0); return 0; @@ -3369,6 +3398,28 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, + [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm, + [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm, + [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm, + [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg, + [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg, + [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, [BPF_JMP | BPF_EXIT] = jmp_exit, }; @@ -3397,7 +3448,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) list_for_each_entry(meta, &nfp_prog->insns, l) { if (meta->flags & FLAG_INSN_SKIP_MASK) continue; - if (BPF_CLASS(meta->insn.code) != BPF_JMP) + if (!is_mbpf_jmp(meta)) continue; if (meta->insn.code == (BPF_JMP | BPF_EXIT) && !nfp_is_main_function(meta)) @@ -3758,16 +3809,14 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) if (meta->flags & FLAG_INSN_SKIP_MASK) continue; - if (BPF_CLASS(insn.code) != BPF_ALU && - BPF_CLASS(insn.code) != BPF_ALU64 && - BPF_CLASS(insn.code) != BPF_JMP) + if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta)) continue; if (BPF_SRC(insn.code) != BPF_K) continue; if (insn.imm >= 0) continue; - if (BPF_CLASS(insn.code) == BPF_JMP) { + if (is_mbpf_jmp(meta)) { switch (BPF_OP(insn.code)) { case BPF_JGE: case BPF_JSGE: @@ -4338,7 +4387,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) unsigned int dst_idx; bool pseudo_call; - if (BPF_CLASS(code) != BPF_JMP) + if (!is_mbpf_jmp(meta)) continue; if (BPF_OP(code) == BPF_EXIT) continue; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 5813c3e13ebe..b25a48218bcf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -365,6 +365,21 @@ static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); } +static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_JMP32; +} + +static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_JMP; +} + +static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta) +{ + return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta); +} + static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) { return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); @@ -419,10 +434,13 @@ static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) { u8 op; - if (BPF_CLASS(meta->insn.code) != BPF_JMP) + if (is_mbpf_jmp32(meta)) + return true; + + if (!is_mbpf_jmp64(meta)) return false; - op = BPF_OP(meta->insn.code); + op = mbpf_op(meta); return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; } From 6ea848b5ced53cbfd5677ff176c2aca10fd61bf4 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:13 -0500 Subject: [PATCH 59/68] selftests: bpf: functional and min/max reasoning unit tests for JMP32 This patch adds unit tests for new JMP32 instructions. This patch also added the new BPF_JMP32_REG and BPF_JMP32_IMM macros to samples/bpf/bpf_insn.h so that JMP32 insn builders are available to tests under 'samples' directory. Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- samples/bpf/bpf_insn.h | 20 + tools/testing/selftests/bpf/test_verifier.c | 786 +++++++++++++++++++- 2 files changed, 782 insertions(+), 24 deletions(-) diff --git a/samples/bpf/bpf_insn.h b/samples/bpf/bpf_insn.h index 20dc5cefec84..544237980582 100644 --- a/samples/bpf/bpf_insn.h +++ b/samples/bpf/bpf_insn.h @@ -164,6 +164,16 @@ struct bpf_insn; .off = OFF, \ .imm = 0 }) +/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ @@ -174,6 +184,16 @@ struct bpf_insn; .off = OFF, \ .imm = IMM }) +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + /* Raw code statement block */ #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index e4fef6ca8071..1df757645a9a 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -211,6 +211,42 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) BPF_MOV64_IMM(BPF_REG_5, 0), \ BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp) +/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return + * value into 0 and does necessary preparation for direct packet access + * through r2. The allowed access range is 8 bytes. + */ +#define BPF_DIRECT_PKT_R2 \ + BPF_MOV64_IMM(BPF_REG_0, 0), \ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ + offsetof(struct __sk_buff, data)), \ + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ + offsetof(struct __sk_buff, data_end)), \ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \ + BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \ + BPF_EXIT_INSN() + +/* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random + * positive u32, and zero-extend it into 64-bit. + */ +#define BPF_RAND_UEXT_R7 \ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ + BPF_FUNC_get_prandom_u32), \ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33) + +/* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random + * negative u32, and sign-extend it into 64-bit. + */ +#define BPF_RAND_SEXT_R7 \ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ + BPF_FUNC_get_prandom_u32), \ + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ + BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \ + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32) + static struct bpf_test tests[] = { { "add+sub+mul", @@ -15447,18 +15483,7 @@ static struct bpf_test tests[] = { { "jset: functional", .insns = { - /* r0 = 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - /* prep for direct packet access via r2 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), - BPF_EXIT_INSN(), - + BPF_DIRECT_PKT_R2, BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), /* reg, bit 63 or bit 0 set, taken */ @@ -15514,18 +15539,7 @@ static struct bpf_test tests[] = { { "jset: sign-extend", .insns = { - /* r0 = 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - /* prep for direct packet access via r2 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), - BPF_EXIT_INSN(), - + BPF_DIRECT_PKT_R2, BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1), @@ -15795,6 +15809,730 @@ static struct bpf_test tests[] = { .result = ACCEPT, .retval = 2, }, + { + "jset32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + /* reg, high bits shouldn't be tested */ + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1ULL << 63, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, + }, + { + "jset32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000), + BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), + BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1ULL << 63, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, + }, + { + "jset32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "jeq32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 2, + .retvals = { + { .retval = 0, + .data64 = { -2, } + }, + { .retval = 2, + .data64 = { -1, } + }, + }, + }, + { + "jeq32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001), + BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 2, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, + }, + { + "jeq32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "jne32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 2, + .retvals = { + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 0, + .data64 = { -1, } + }, + }, + }, + { + "jne32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), + BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 2, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 2, } + }, + }, + }, + { + "jne32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "jge32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { 0, } + }, + }, + }, + { + "jge32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { INT_MAX, } + }, + { .retval = 0, + .data64 = { (UINT_MAX - 1) | 2ULL << 32, } + }, + }, + }, + { + "jge32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jgt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { 0, } + }, + }, + }, + { + "jgt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { (UINT_MAX - 1) | 2ULL << 32, } + }, + }, + }, + { + "jgt32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jle32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { INT_MAX - 1, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { INT_MAX, } + }, + }, + }, + { + "jle32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX | 1ULL << 32, } + }, + { .retval = 2, + .data64 = { INT_MAX - 2, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + }, + }, + { + "jle32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jlt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { INT_MAX - 1, } + }, + }, + }, + { + "jlt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX | 1ULL << 32, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { (INT_MAX - 1) | 3ULL << 32, } + }, + }, + }, + { + "jlt32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jsge32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0, } + }, + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { -2, } + }, + }, + }, + { + "jsge32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 2, + .data64 = { 0x7fffffff | 1ULL << 32, } + }, + { .retval = 0, + .data64 = { -2, } + }, + }, + }, + { + "jsge32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jsgt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { (__u32)-2, } + }, + { .retval = 0, + .data64 = { -1, } + }, + { .retval = 2, + .data64 = { 1, } + }, + }, + }, + { + "jsgt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 0x7ffffffe, } + }, + { .retval = 0, + .data64 = { 0x1ffffffffULL, } + }, + { .retval = 2, + .data64 = { 0x7fffffff, } + }, + }, + }, + { + "jsgt32: min/max deduction", + .insns = { + BPF_RAND_SEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32), + BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jsle32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { (__u32)-2, } + }, + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { 1, } + }, + }, + }, + { + "jsle32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0x7ffffffe, } + }, + { .retval = 2, + .data64 = { (__u32)-1, } + }, + { .retval = 0, + .data64 = { 0x7fffffff | 2ULL << 32, } + }, + }, + }, + { + "jsle32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jslt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { (__u32)-2, } + }, + { .retval = 0, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { 1, } + }, + }, + }, + { + "jslt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0x7ffffffe, } + }, + { .retval = 2, + .data64 = { 0xffffffff, } + }, + { .retval = 0, + .data64 = { 0x7fffffff | 2ULL << 32, } + }, + }, + }, + { + "jslt32: min/max deduction", + .insns = { + BPF_RAND_SEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32), + BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, }; static int probe_filter_length(const struct bpf_insn *fp) From 3ef84346c56141c72a28efcad2ddf17809533832 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Sat, 26 Jan 2019 12:26:14 -0500 Subject: [PATCH 60/68] selftests: bpf: makefile support sub-register code-gen test mode This patch enables testing some eBPF programs under sub-register compilation mode. Only enable this when there is BPF_JMP32 support on both LLVM and kernel. This is because only after BPF_JMP32 added, code-gen for complex program under sub-register mode will be clean enough to pass verification. This patch splits TEST_GEN_FILES into BPF_OBJ_FILES and BPF_OBJ_FILES_DUAL_COMPILE. The latter are those objects we would like to compile for both default and sub-register mode. They are also objects used by "test_progs". Reviewed-by: Jakub Kicinski Signed-off-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 95 ++++++++++++++++++++-------- 1 file changed, 70 insertions(+), 25 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1cca5f2567c2..d61b023c8ddc 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -10,15 +10,14 @@ ifneq ($(wildcard $(GENHDR)),) GENFLAGS := -DHAVE_GENHDR endif +CLANG ?= clang +LLC ?= llc +LLVM_OBJCOPY ?= llvm-objcopy +LLVM_READELF ?= llvm-readelf +BTF_PAHOLE ?= pahole CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(BPFDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include LDLIBS += -lcap -lelf -lrt -lpthread -TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read -all: $(TEST_CUSTOM_PROGS) - -$(TEST_CUSTOM_PROGS): $(OUTPUT)/%: %.c - $(CC) -o $(TEST_CUSTOM_PROGS) -static $< -Wl,--build-id - # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ @@ -26,21 +25,42 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \ test_netcnt test_tcpnotify_user -TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ - test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ - sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \ - test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \ - test_tcpnotify_kern.o \ - sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \ - sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \ - test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \ - test_get_stack_rawtp.o test_sockmap_kern.o test_sockhash_kern.o \ - test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ +BPF_OBJ_FILES = \ + test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ + sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o \ + test_tcpnotify_kern.o sample_map_ret0.o test_tcpbpf_kern.o \ + sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o \ + test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o \ + test_tunnel_kern.o test_sockhash_kern.o test_lwt_seg6local.o \ + sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \ get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \ - test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \ - test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \ + test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o test_xdp_vlan.o \ xdp_dummy.o test_map_in_map.o +# Objects are built with default compilation flags and with sub-register +# code-gen enabled. +BPF_OBJ_FILES_DUAL_COMPILE = \ + test_pkt_access.o test_pkt_access.o test_xdp.o test_adjust_tail.o \ + test_l4lb.o test_l4lb_noinline.o test_xdp_noinline.o test_tcp_estats.o \ + test_obj_id.o test_pkt_md_access.o test_tracepoint.o \ + test_stacktrace_map.o test_stacktrace_map.o test_stacktrace_build_id.o \ + test_stacktrace_build_id.o test_get_stack_rawtp.o \ + test_get_stack_rawtp.o test_tracepoint.o test_sk_lookup_kern.o \ + test_queue_map.o test_stack_map.o + +TEST_GEN_FILES = $(BPF_OBJ_FILES) $(BPF_OBJ_FILES_DUAL_COMPILE) + +# Also test sub-register code-gen if LLVM + kernel both has eBPF v3 processor +# support which is the first version to contain both ALU32 and JMP32 +# instructions. +SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ + $(CLANG) -target bpf -O2 -emit-llvm -S -x c - -o - | \ + $(LLC) -mattr=+alu32 -mcpu=probe 2>&1 | \ + grep 'if w') +ifneq ($(SUBREG_CODEGEN),) +TEST_GEN_FILES += $(patsubst %.o,alu32/%.o, $(BPF_OBJ_FILES_DUAL_COMPILE)) +endif + # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ test_libbpf.sh \ @@ -65,6 +85,13 @@ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_use include ../lib.mk +# NOTE: $(OUTPUT) won't get default value if used before lib.mk +TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read +all: $(TEST_CUSTOM_PROGS) + +$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c + $(CC) -o $@ -static $< -Wl,--build-id + BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS): $(BPFOBJ) @@ -92,12 +119,6 @@ force: $(BPFOBJ): force $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ -CLANG ?= clang -LLC ?= llc -LLVM_OBJCOPY ?= llvm-objcopy -LLVM_READELF ?= llvm-readelf -BTF_PAHOLE ?= pahole - PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) # Let newer LLVM versions transparently probe the kernel for availability @@ -149,6 +170,30 @@ endif endif endif +ifneq ($(SUBREG_CODEGEN),) +ALU32_BUILD_DIR = $(OUTPUT)/alu32 +TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32 +$(ALU32_BUILD_DIR): + mkdir -p $@ + +$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read + cp $< $@ + +$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(ALU32_BUILD_DIR) \ + $(ALU32_BUILD_DIR)/urandom_read + $(CC) $(CFLAGS) -o $(ALU32_BUILD_DIR)/test_progs_32 $< \ + trace_helpers.c $(OUTPUT)/libbpf.a $(LDLIBS) + +$(ALU32_BUILD_DIR)/%.o: %.c $(ALU32_BUILD_DIR) $(ALU32_BUILD_DIR)/test_progs_32 + $(CLANG) $(CLANG_FLAGS) \ + -O2 -target bpf -emit-llvm -c $< -o - | \ + $(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \ + -filetype=obj -o $@ +ifeq ($(DWARF2BTF),y) + $(BTF_PAHOLE) -J $@ +endif +endif + # Have one program compiled without "-target bpf" to test whether libbpf loads # it successfully $(OUTPUT)/test_xdp.o: test_xdp.c @@ -167,4 +212,4 @@ ifeq ($(DWARF2BTF),y) $(BTF_PAHOLE) -J $@ endif -EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) From 2dfb40121ee83139909e4e17d414eee87897bb8e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 Jan 2019 15:24:42 -0800 Subject: [PATCH 61/68] selftests: bpf: prepare for break up of verifier tests test_verifier.c has grown to be very long (almost 16 kLoC), and it is very conflict prone since we always add tests at the end. Try to break it apart a little bit. Allow test snippets to be defined in separate files and include them automatically into the huge test array. Signed-off-by: Jakub Kicinski Acked-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 13 +++++++++++++ tools/testing/selftests/bpf/test_verifier.c | 3 +++ tools/testing/selftests/bpf/verifier/.gitignore | 1 + 3 files changed, 17 insertions(+) create mode 100644 tools/testing/selftests/bpf/verifier/.gitignore diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index d61b023c8ddc..89b0d1799ff3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -212,4 +212,17 @@ ifeq ($(DWARF2BTF),y) $(BTF_PAHOLE) -J $@ endif +$(OUTPUT)/test_verifier: $(OUTPUT)/verifier/tests.h +$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT) + +VERIFIER_TEST_FILES := $(wildcard verifier/*.c) +$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES) + $(shell ( cd verifier/ + echo '/* Generated header, do not edit */'; \ + echo '#ifdef FILL_ARRAY'; \ + ls *.c 2> /dev/null | \ + sed -e 's@\(.*\)@#include \"\1\"@'; \ + echo '#endif' \ + ) > $(OUTPUT)/verifier/tests.h) + EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 1df757645a9a..0a1847c3d1a3 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -248,6 +248,9 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32) static struct bpf_test tests[] = { +#define FILL_ARRAY +#include +#undef FILL_ARRAY { "add+sub+mul", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/.gitignore b/tools/testing/selftests/bpf/verifier/.gitignore new file mode 100644 index 000000000000..45984a364647 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/.gitignore @@ -0,0 +1 @@ +tests.h From 40f2fbd5a5e9c6d0799632fcba174a7b45c471da Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 Jan 2019 15:24:43 -0800 Subject: [PATCH 62/68] selftests: bpf: break up test_verifier Break up the first 10 kLoC of test verifier test cases out into smaller files. Looks like git line counting gets a little flismy above 16 bit integers, so we need two commits to break up test_verifier. Signed-off-by: Jakub Kicinski Acked-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_verifier.c | 10136 ---------------- tools/testing/selftests/bpf/verifier/and.c | 50 + .../selftests/bpf/verifier/array_access.c | 219 + tools/testing/selftests/bpf/verifier/basic.c | 23 + .../selftests/bpf/verifier/basic_call.c | 50 + .../selftests/bpf/verifier/basic_instr.c | 92 + .../selftests/bpf/verifier/basic_stack.c | 64 + .../selftests/bpf/verifier/basic_stx_ldx.c | 45 + tools/testing/selftests/bpf/verifier/bounds.c | 508 + .../bpf/verifier/bounds_mix_sign_unsign.c | 406 + tools/testing/selftests/bpf/verifier/cfg.c | 70 + .../selftests/bpf/verifier/cgroup_skb.c | 197 + .../selftests/bpf/verifier/cgroup_storage.c | 220 + .../testing/selftests/bpf/verifier/const_or.c | 60 + tools/testing/selftests/bpf/verifier/ctx.c | 23 + .../selftests/bpf/verifier/ctx_sk_msg.c | 180 + .../testing/selftests/bpf/verifier/ctx_skb.c | 1001 ++ .../bpf/verifier/direct_packet_access.c | 619 + .../verifier/direct_stack_access_wraparound.c | 40 + tools/testing/selftests/bpf/verifier/div0.c | 184 + .../selftests/bpf/verifier/div_overflow.c | 104 + .../bpf/verifier/helper_access_var_len.c | 614 + .../bpf/verifier/helper_packet_access.c | 460 + .../bpf/verifier/helper_value_access.c | 953 ++ tools/testing/selftests/bpf/verifier/jump.c | 180 + .../selftests/bpf/verifier/junk_insn.c | 45 + tools/testing/selftests/bpf/verifier/ld_abs.c | 95 + .../testing/selftests/bpf/verifier/ld_imm64.c | 141 + tools/testing/selftests/bpf/verifier/ld_ind.c | 72 + .../testing/selftests/bpf/verifier/leak_ptr.c | 67 + tools/testing/selftests/bpf/verifier/lwt.c | 189 + .../selftests/bpf/verifier/map_in_map.c | 62 + .../selftests/bpf/verifier/map_ret_val.c | 65 + .../bpf/verifier/perf_event_sample_period.c | 59 + .../bpf/verifier/prevent_map_lookup.c | 74 + .../selftests/bpf/verifier/raw_stack.c | 305 + .../selftests/bpf/verifier/runtime_jit.c | 80 + .../selftests/bpf/verifier/search_pruning.c | 83 + .../selftests/bpf/verifier/spill_fill.c | 75 + .../selftests/bpf/verifier/stack_ptr.c | 317 + tools/testing/selftests/bpf/verifier/uninit.c | 39 + tools/testing/selftests/bpf/verifier/unpriv.c | 521 + tools/testing/selftests/bpf/verifier/value.c | 104 + .../selftests/bpf/verifier/value_adj_spill.c | 43 + .../bpf/verifier/value_illegal_alu.c | 94 + .../selftests/bpf/verifier/value_or_null.c | 152 + .../selftests/bpf/verifier/value_ptr_arith.c | 792 ++ .../testing/selftests/bpf/verifier/var_off.c | 66 + 48 files changed, 9902 insertions(+), 10136 deletions(-) create mode 100644 tools/testing/selftests/bpf/verifier/and.c create mode 100644 tools/testing/selftests/bpf/verifier/array_access.c create mode 100644 tools/testing/selftests/bpf/verifier/basic.c create mode 100644 tools/testing/selftests/bpf/verifier/basic_call.c create mode 100644 tools/testing/selftests/bpf/verifier/basic_instr.c create mode 100644 tools/testing/selftests/bpf/verifier/basic_stack.c create mode 100644 tools/testing/selftests/bpf/verifier/basic_stx_ldx.c create mode 100644 tools/testing/selftests/bpf/verifier/bounds.c create mode 100644 tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c create mode 100644 tools/testing/selftests/bpf/verifier/cfg.c create mode 100644 tools/testing/selftests/bpf/verifier/cgroup_skb.c create mode 100644 tools/testing/selftests/bpf/verifier/cgroup_storage.c create mode 100644 tools/testing/selftests/bpf/verifier/const_or.c create mode 100644 tools/testing/selftests/bpf/verifier/ctx.c create mode 100644 tools/testing/selftests/bpf/verifier/ctx_sk_msg.c create mode 100644 tools/testing/selftests/bpf/verifier/ctx_skb.c create mode 100644 tools/testing/selftests/bpf/verifier/direct_packet_access.c create mode 100644 tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c create mode 100644 tools/testing/selftests/bpf/verifier/div0.c create mode 100644 tools/testing/selftests/bpf/verifier/div_overflow.c create mode 100644 tools/testing/selftests/bpf/verifier/helper_access_var_len.c create mode 100644 tools/testing/selftests/bpf/verifier/helper_packet_access.c create mode 100644 tools/testing/selftests/bpf/verifier/helper_value_access.c create mode 100644 tools/testing/selftests/bpf/verifier/jump.c create mode 100644 tools/testing/selftests/bpf/verifier/junk_insn.c create mode 100644 tools/testing/selftests/bpf/verifier/ld_abs.c create mode 100644 tools/testing/selftests/bpf/verifier/ld_imm64.c create mode 100644 tools/testing/selftests/bpf/verifier/ld_ind.c create mode 100644 tools/testing/selftests/bpf/verifier/leak_ptr.c create mode 100644 tools/testing/selftests/bpf/verifier/lwt.c create mode 100644 tools/testing/selftests/bpf/verifier/map_in_map.c create mode 100644 tools/testing/selftests/bpf/verifier/map_ret_val.c create mode 100644 tools/testing/selftests/bpf/verifier/perf_event_sample_period.c create mode 100644 tools/testing/selftests/bpf/verifier/prevent_map_lookup.c create mode 100644 tools/testing/selftests/bpf/verifier/raw_stack.c create mode 100644 tools/testing/selftests/bpf/verifier/runtime_jit.c create mode 100644 tools/testing/selftests/bpf/verifier/search_pruning.c create mode 100644 tools/testing/selftests/bpf/verifier/spill_fill.c create mode 100644 tools/testing/selftests/bpf/verifier/stack_ptr.c create mode 100644 tools/testing/selftests/bpf/verifier/uninit.c create mode 100644 tools/testing/selftests/bpf/verifier/unpriv.c create mode 100644 tools/testing/selftests/bpf/verifier/value.c create mode 100644 tools/testing/selftests/bpf/verifier/value_adj_spill.c create mode 100644 tools/testing/selftests/bpf/verifier/value_illegal_alu.c create mode 100644 tools/testing/selftests/bpf/verifier/value_or_null.c create mode 100644 tools/testing/selftests/bpf/verifier/value_ptr_arith.c create mode 100644 tools/testing/selftests/bpf/verifier/var_off.c diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 0a1847c3d1a3..fcdb7e2d2ec0 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -251,10142 +251,6 @@ static struct bpf_test tests[] = { #define FILL_ARRAY #include #undef FILL_ARRAY - { - "add+sub+mul", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_2, 3), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -3, - }, - { - "DIV32 by 0, zero check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "DIV32 by 0, zero check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "DIV64 by 0, zero check", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "MOD32 by 0, zero check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "MOD32 by 0, zero check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "MOD64 by 0, zero check", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "DIV32 by 0, zero check ok, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 2), - BPF_MOV32_IMM(BPF_REG_2, 16), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 8, - }, - { - "DIV32 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "DIV32 by 0, zero check 2, cls", - .insns = { - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "DIV64 by 0, zero check, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "MOD32 by 0, zero check ok, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 3), - BPF_MOV32_IMM(BPF_REG_2, 5), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2, - }, - { - "MOD32 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "MOD32 by 0, zero check 2, cls", - .insns = { - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "MOD64 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 2), - BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2, - }, - { - "MOD64 by 0, zero check 2, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, -1), - BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = -1, - }, - /* Just make sure that JITs used udiv/umod as otherwise we get - * an exception from INT_MIN/-1 overflow similarly as with div - * by zero. - */ - { - "DIV32 overflow, check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "DIV32 overflow, check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "DIV64 overflow, check 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "DIV64 overflow, check 2", - .insns = { - BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "MOD32 overflow, check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = INT_MIN, - }, - { - "MOD32 overflow, check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = INT_MIN, - }, - { - "MOD64 overflow, check 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "MOD64 overflow, check 2", - .insns = { - BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "xor32 zero extend check", - .insns = { - BPF_MOV32_IMM(BPF_REG_2, -1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32), - BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff), - BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2), - BPF_MOV32_IMM(BPF_REG_0, 2), - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "empty prog", - .insns = { - }, - .errstr = "unknown opcode 00", - .result = REJECT, - }, - { - "only exit insn", - .insns = { - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, - }, - { - "unreachable", - .insns = { - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable", - .result = REJECT, - }, - { - "unreachable2", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable", - .result = REJECT, - }, - { - "out of range jump", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "out of range jump2", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, -2), - BPF_EXIT_INSN(), - }, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "test1 ld_imm64", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr = "invalid BPF_LD_IMM insn", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "test2 ld_imm64", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid BPF_LD_IMM insn", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "test3 ld_imm64", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_LD_IMM64(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test4 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test5 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test6 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), - BPF_RAW_INSN(0, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "test7 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), - BPF_RAW_INSN(0, 0, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 1, - }, - { - "test8 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), - BPF_RAW_INSN(0, 0, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "uses reserved fields", - .result = REJECT, - }, - { - "test9 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), - BPF_RAW_INSN(0, 0, 0, 1, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test10 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), - BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test11 ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), - BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "test12 ld_imm64", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), - BPF_RAW_INSN(0, 0, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "not pointing to valid bpf_map", - .result = REJECT, - }, - { - "test13 ld_imm64", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), - BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_ld_imm64 insn", - .result = REJECT, - }, - { - "arsh32 on imm", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "arsh32 on imm 2", - .insns = { - BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -16069393, - }, - { - "arsh32 on reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "arsh32 on reg 2", - .insns = { - BPF_LD_IMM64(BPF_REG_0, 0xffff55667788), - BPF_MOV64_IMM(BPF_REG_1, 15), - BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 43724, - }, - { - "arsh64 on imm", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "arsh64 on reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "no bpf_exit", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), - }, - .errstr = "not an exit", - .result = REJECT, - }, - { - "loop (back-edge)", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, -1), - BPF_EXIT_INSN(), - }, - .errstr = "back-edge", - .result = REJECT, - }, - { - "loop2 (back-edge)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - BPF_EXIT_INSN(), - }, - .errstr = "back-edge", - .result = REJECT, - }, - { - "conditional loop", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), - BPF_EXIT_INSN(), - }, - .errstr = "back-edge", - .result = REJECT, - }, - { - "read uninitialized register", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R2 !read_ok", - .result = REJECT, - }, - { - "read invalid register", - .insns = { - BPF_MOV64_REG(BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr = "R15 is invalid", - .result = REJECT, - }, - { - "program doesn't init R0 before exit", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, - }, - { - "program doesn't init R0 before exit in all branches", - .insns = { - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "stack out of bounds", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack", - .result = REJECT, - }, - { - "invalid call insn1", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown opcode 8d", - .result = REJECT, - }, - { - "invalid call insn2", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_CALL uses reserved", - .result = REJECT, - }, - { - "invalid function call", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567), - BPF_EXIT_INSN(), - }, - .errstr = "invalid func unknown#1234567", - .result = REJECT, - }, - { - "uninitialized stack1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 2 }, - .errstr = "invalid indirect read from stack", - .result = REJECT, - }, - { - "uninitialized stack2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid read from stack", - .result = REJECT, - }, - { - "invalid fp arithmetic", - /* If this gets ever changed, make sure JITs can deal with it. */ - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 subtraction from stack pointer", - .result = REJECT, - }, - { - "non-invalid fp arithmetic", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "invalid argument register", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_cgroup_classid), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_cgroup_classid), - BPF_EXIT_INSN(), - }, - .errstr = "R1 !read_ok", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "non-invalid argument register", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_cgroup_classid), - BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_cgroup_classid), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "check valid spill/fill", - .insns = { - /* spill R1(ctx) into stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - /* fill it back into R2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - /* should be able to access R0 = *(R2 + 8) */ - /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .retval = POINTER_VALUE, - }, - { - "check valid spill/fill, skb mark", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = ACCEPT, - }, - { - "check corrupted spill/fill", - .insns = { - /* spill R1(ctx) into stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - /* mess up with R1 pointer on stack */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), - /* fill back into R0 is fine for priv. - * R0 now becomes SCALAR_VALUE. - */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - /* Load from R0 should fail. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .errstr = "R0 invalid mem access 'inv", - .result = REJECT, - }, - { - "check corrupted spill/fill, LSB", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = POINTER_VALUE, - }, - { - "check corrupted spill/fill, MSB", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = POINTER_VALUE, - }, - { - "invalid src register in STX", - .insns = { - BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), - BPF_EXIT_INSN(), - }, - .errstr = "R15 is invalid", - .result = REJECT, - }, - { - "invalid dst register in STX", - .insns = { - BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1), - BPF_EXIT_INSN(), - }, - .errstr = "R14 is invalid", - .result = REJECT, - }, - { - "invalid dst register in ST", - .insns = { - BPF_ST_MEM(BPF_B, 14, -1, -1), - BPF_EXIT_INSN(), - }, - .errstr = "R14 is invalid", - .result = REJECT, - }, - { - "invalid src register in LDX", - .insns = { - BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R12 is invalid", - .result = REJECT, - }, - { - "invalid dst register in LDX", - .insns = { - BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R11 is invalid", - .result = REJECT, - }, - { - "junk insn", - .insns = { - BPF_RAW_INSN(0, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown opcode 00", - .result = REJECT, - }, - { - "junk insn2", - .insns = { - BPF_RAW_INSN(1, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_LDX uses reserved fields", - .result = REJECT, - }, - { - "junk insn3", - .insns = { - BPF_RAW_INSN(-1, 0, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown opcode ff", - .result = REJECT, - }, - { - "junk insn4", - .insns = { - BPF_RAW_INSN(-1, -1, -1, -1, -1), - BPF_EXIT_INSN(), - }, - .errstr = "unknown opcode ff", - .result = REJECT, - }, - { - "junk insn5", - .insns = { - BPF_RAW_INSN(0x7f, -1, -1, -1, -1), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_ALU uses reserved fields", - .result = REJECT, - }, - { - "misaligned read from stack", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned stack access", - .result = REJECT, - }, - { - "invalid map_fd for function call", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_delete_elem), - BPF_EXIT_INSN(), - }, - .errstr = "fd 0 is not pointing to valid bpf_map", - .result = REJECT, - }, - { - "don't check return value before access", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 invalid mem access 'map_value_or_null'", - .result = REJECT, - }, - { - "access memory with incorrect alignment", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "misaligned value access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "sometimes access memory with incorrect alignment", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 invalid mem access", - .errstr_unpriv = "R0 leaks addr", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "jump test 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "jump test 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 14), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 11), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 5), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "jump test 3", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 19), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_JMP_IMM(BPF_JA, 0, 0, 15), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), - BPF_JMP_IMM(BPF_JA, 0, 0, 11), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40), - BPF_JMP_IMM(BPF_JA, 0, 0, 7), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_delete_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 24 }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = -ENOENT, - }, - { - "jump test 4", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "jump test 5", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "access skb fields ok", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, pkt_type)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, queue_mapping)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, protocol)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, vlan_present)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, vlan_tci)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, napi_id)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "access skb fields bad1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "access skb fields bad2", - .insns = { - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, pkt_type)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "different pointers", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "access skb fields bad3", - .insns = { - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, pkt_type)), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JA, 0, 0, -12), - }, - .fixup_map_hash_8b = { 6 }, - .errstr = "different pointers", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "access skb fields bad4", - .insns = { - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JA, 0, 0, -13), - }, - .fixup_map_hash_8b = { 7 }, - .errstr = "different pointers", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, - }, - { - "invalid access __sk_buff family", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, family)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff remote_ip4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip4)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff local_ip4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip4)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff remote_ip6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff local_ip6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff remote_port", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_port)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "invalid access __sk_buff remote_port", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_port)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "valid access __sk_buff family", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, family)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff remote_ip4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff local_ip4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff remote_ip6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff local_ip6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff remote_port", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, remote_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access __sk_buff remote_port", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, local_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "invalid access of tc_classid for SK_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .errstr = "invalid bpf_context access", - }, - { - "invalid access of skb->mark for SK_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .errstr = "invalid bpf_context access", - }, - { - "check skb->mark is not writeable by SK_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .errstr = "invalid bpf_context access", - }, - { - "check skb->tc_index is writeable by SK_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tc_index)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "check skb->priority is writeable by SK_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, priority)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "direct packet read for SK_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "direct packet write for SK_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "overlapping checks for direct packet access SK_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access family in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, family)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "valid access remote_ip4 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "valid access local_ip4 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "valid access remote_port in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "valid access local_port in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "valid access remote_ip6 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access local_ip6 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, - }, - { - "valid access size in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, size)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "invalid 64B read of size in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, size)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "invalid read past end of SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, size) + 4), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "invalid read offset in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, family) + 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet read for SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "direct packet write for SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "overlapping checks for direct packet access SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - }, - { - "check skb->mark is not writeable by sockets", - .insns = { - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .errstr_unpriv = "R1 leaks addr", - .result = REJECT, - }, - { - "check skb->tc_index is not writeable by sockets", - .insns = { - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, tc_index)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .errstr_unpriv = "R1 leaks addr", - .result = REJECT, - }, - { - "check cb access: byte", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 1), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 2), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 3), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1])), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1]) + 1), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1]) + 2), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1]) + 3), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2])), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2]) + 1), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2]) + 2), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2]) + 3), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3])), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3]) + 1), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3]) + 2), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3]) + 3), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4])), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 1), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 2), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) + 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) + 2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) + 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1]) + 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1]) + 2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1]) + 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2]) + 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2]) + 2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2]) + 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3]) + 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3]) + 2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3]) + 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "__sk_buff->hash, offset 0, byte store not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, hash)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "__sk_buff->tc_index, offset 3, byte store not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tc_index) + 3), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check skb->hash byte load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash)), -#else - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 3), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check skb->hash byte load permitted 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check skb->hash byte load permitted 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check skb->hash byte load permitted 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 3), -#else - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash)), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check cb access: byte, wrong type", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "check cb access: half", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 2), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1])), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1]) + 2), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2])), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2]) + 2), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3])), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3]) + 2), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4])), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 2), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) + 2), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1])), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1]) + 2), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2]) + 2), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3]) + 2), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4]) + 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check cb access: half, unaligned", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 1), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check __sk_buff->hash, offset 0, half store not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, hash)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check __sk_buff->tc_index, offset 2, half store not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tc_index) + 2), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check skb->hash half load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 2), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check skb->hash half load permitted 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 2), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash)), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check skb->hash half load not permitted, unaligned 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 1), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 3), -#endif - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check skb->hash half load not permitted, unaligned 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 3), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, hash) + 1), -#endif - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "check cb access: half, wrong type", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "check cb access: word", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check cb access: word, unaligned 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0]) + 2), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: word, unaligned 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 1), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: word, unaligned 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 2), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: word, unaligned 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4]) + 3), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: double", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check cb access: double, unaligned 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[1])), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: double, unaligned 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3])), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned context access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "check cb access: double, oob 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[4])), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, oob 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check __sk_buff->ifindex dw store not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, ifindex)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check __sk_buff->ifindex dw load not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, ifindex)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check cb access: double, wrong type", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "check out of range skb->cb access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0]) + 256), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .errstr_unpriv = "", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_ACT, - }, - { - "write skb fields from socket prog", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_index)), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .errstr_unpriv = "R1 leaks addr", - .result_unpriv = REJECT, - }, - { - "write skb fields from tc_cls_act prog", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_index)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tc_index)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tstamp)), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tstamp)), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "", - .result_unpriv = REJECT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "PTR_TO_STACK store/load", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xfaceb00c, - }, - { - "PTR_TO_STACK store/load - bad alignment on off", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", - }, - { - "PTR_TO_STACK store/load - bad alignment on reg", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", - }, - { - "PTR_TO_STACK store/load - out of bounds low", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off=-79992 size=8", - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - }, - { - "PTR_TO_STACK store/load - out of bounds high", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off=0 size=8", - }, - { - "unpriv: return pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr", - .retval = POINTER_VALUE, - }, - { - "unpriv: add const to pointer", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "unpriv: add pointer to pointer", - .insns = { - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 pointer += pointer", - }, - { - "unpriv: neg pointer", - .insns = { - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer arithmetic", - }, - { - "unpriv: cmp pointer with const", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer comparison", - }, - { - "unpriv: cmp pointer with pointer", - .insns = { - BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R10 pointer comparison", - }, - { - "unpriv: check that printk is disallowed", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_trace_printk), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "unknown func bpf_trace_printk#6", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: pass pointer to helper function", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R4 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: indirectly pass pointer on stack to helper function", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "invalid indirect read from stack off -8+0 size 8", - .result = REJECT, - }, - { - "unpriv: mangle pointer on stack 1", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: mangle pointer on stack 2", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: read pointer from stack in small chunks", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid size", - .result = REJECT, - }, - { - "unpriv: write pointer into ctx", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 leaks addr", - .result_unpriv = REJECT, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "unpriv: spill/fill of ctx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "unpriv: spill/fill of ctx 2", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_hash_recalc), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of ctx 3", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=fp expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of ctx 4", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, - BPF_REG_0, -8, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=inv expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers stx", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers stx - ctx and sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - /* if (sk) bpf_sk_release(sk) */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "type=ctx expected=sock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers stx - leak sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "Unreleased reference", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers stx - sock and ctx (read)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers stx - sock and ctx (write)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) sk->mark = 42; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "cannot write into socket", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "unpriv: spill/fill of different pointers ldx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, - -(__s32)offsetof(struct bpf_perf_event_data, - sample_period) - 8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, - offsetof(struct bpf_perf_event_data, - sample_period)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "unpriv: write pointer into map elem value", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "alu32: mov u32 const", - .insns = { - BPF_MOV32_IMM(BPF_REG_7, 0), - BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1), - BPF_MOV32_REG(BPF_REG_0, BPF_REG_7), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "unpriv: partial copy of pointer", - .insns = { - BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 partial copy", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: pass pointer to tail_call", - .insns = { - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .errstr_unpriv = "R3 leaks addr into helper", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: cmp map pointer with zero", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: write into frame pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, - }, - { - "unpriv: spill/fill frame pointer", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, - }, - { - "unpriv: cmp of frame pointer", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: adding of fp", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "unpriv: cmp of stack pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R2 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "runtime/jit: tail_call within bounds, prog once", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 42, - }, - { - "runtime/jit: tail_call within bounds, prog loop", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 41, - }, - { - "runtime/jit: tail_call within bounds, no prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 1, - }, - { - "runtime/jit: tail_call out of bounds", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 256), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, - }, - { - "runtime/jit: pass negative index to tail_call", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, - }, - { - "runtime/jit: pass > 32bit index to tail_call", - .insns = { - BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 2 }, - .result = ACCEPT, - .retval = 42, - /* Verifier rewrite for unpriv skips tail call here. */ - .retval_unpriv = 2, - }, - { - "PTR_TO_STACK check high 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK check high 2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK check high 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK check high 4", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid stack off=0 size=1", - .result = REJECT, - }, - { - "PTR_TO_STACK check high 5", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off", - }, - { - "PTR_TO_STACK check high 6", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off", - }, - { - "PTR_TO_STACK check high 7", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "fp pointer offset", - }, - { - "PTR_TO_STACK check low 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK check low 2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), - BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK check low 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid stack off=-513 size=1", - .result = REJECT, - }, - { - "PTR_TO_STACK check low 4", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "math between fp pointer", - }, - { - "PTR_TO_STACK check low 5", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off", - }, - { - "PTR_TO_STACK check low 6", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack off", - }, - { - "PTR_TO_STACK check low 7", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "fp pointer offset", - }, - { - "PTR_TO_STACK mixed reg/k, 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK mixed reg/k, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, - }, - { - "PTR_TO_STACK mixed reg/k, 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -3, - }, - { - "PTR_TO_STACK reg", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid stack off=0 size=1", - .result = ACCEPT, - .retval = 42, - }, - { - "stack pointer arithmetic", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ST_MEM(0, BPF_REG_2, 4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_ST_MEM(0, BPF_REG_2, 4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "raw_stack: no skb_load_bytes", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - /* Call to skb_load_bytes() omitted. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid read from stack off -8+0 size 8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, negative len", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, negative len 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, ~0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, zero len", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack type R3", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, no init", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, init", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, spilled regs around bounds", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, spilled regs corruption", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "raw_stack: skb_load_bytes, spilled regs corruption 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, - offsetof(struct __sk_buff, pkt_type)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R3 invalid mem access 'inv'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "raw_stack: skb_load_bytes, spilled regs + data", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack type R3 off=-513 access_size=8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack type R3 off=-1 access_size=8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, invalid access 6", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid stack type R3 off=-512 access_size=0", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "raw_stack: skb_load_bytes, large access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 512), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "context stores via ST", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_ST stores into R1 ctx is not allowed", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "context stores via XADD", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1, - BPF_REG_0, offsetof(struct __sk_buff, mark), 0), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_XADD stores into R1 ctx is not allowed", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access off=76", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - }, - { - "direct packet access: test4 (write)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test5 (pkt_end >= reg, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test6 (pkt_end >= reg, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test7 (pkt_end >= reg, both accesses)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test8 (double test, variant 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test9 (double test, variant 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test10 (write invalid)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test11 (shift, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, - }, - { - "direct packet access: test12 (and, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, - }, - { - "direct packet access: test13 (branches, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), - BPF_MOV64_IMM(BPF_REG_3, 14), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 24), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, - }, - { - "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), - BPF_MOV64_IMM(BPF_REG_5, 12), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, - }, - { - "direct packet access: test15 (spill with xadd)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_5, 4096), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R2 invalid mem access 'inv'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test16 (arith on data_end)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test17 (pruning, alignment)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_A(-6), - }, - .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - }, - { - "direct packet access: test18 (imm += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 8), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test19 (imm += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - BPF_MOV64_IMM(BPF_REG_4, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test20 (x += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test21 (x += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test22 (x += pkt_ptr, 3)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test23 (x += pkt_ptr, 4)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test24 (x += pkt_ptr, 5)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 64), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet access: test25 (marking on <, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test26 (marking on <, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "direct packet access: test27 (marking on <=, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, - }, - { - "direct packet access: test28 (marking on <=, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test1, valid packet_ptr range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result_unpriv = ACCEPT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "helper access to packet: test2, unchecked packet_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "helper access to packet: test3, variable add", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "helper access to packet: test4, packet_ptr with bad range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 7 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "helper access to packet: test5, packet_ptr with too short range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "helper access to packet: test6, cls valid packet_ptr range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test7, cls unchecked packet_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test8, cls variable add", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test9, cls packet_ptr with bad range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 7 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test10, cls packet_ptr with too short range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test11, cls unsuitable helper 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_4, 42), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_store_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "helper access to the packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test12, cls unsuitable helper 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_4, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "helper access to the packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test13, cls helper ok", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test14, cls helper ok sub", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test15, cls helper fail sub", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test16, cls helper fail range 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test17, cls helper fail range 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, -9), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R2 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test18, cls helper fail range 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, ~0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R2 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test19, cls helper range zero", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test20, pkt end as input", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=pkt_end expected=fp", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to packet: test21, wrong reg", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "prevent map lookup in sockmap", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_SOCK_OPS, - }, - { - "prevent map lookup in sockhash", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_sockhash = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_SOCK_OPS, - }, - { - "prevent map lookup in xskmap", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "prevent map lookup in stack trace", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_stacktrace = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "prevent map lookup in prog array", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem", - }, - { - "valid map access into an array with a constant", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "valid map access into an array with a register", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "valid map access into an array with a variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "valid map access into an array with a signed variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid map access into an array with a constant", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=48 size=8", - .result = REJECT, - }, - { - "invalid map access into an array with a register", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 min value is outside of the array range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid map access into an array with a variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid map access into an array with no floor check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid map access into an array with a invalid max check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "invalid access to map value, value_size=48 off=44 size=8", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid map access into an array with a invalid max check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3, 11 }, - .errstr = "R0 pointer += pointer", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "direct packet read test#1 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, pkt_type)), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, queue_mapping)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, protocol)), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, vlan_present)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid bpf_context access off=76 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "direct packet read test#2 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, vlan_tci)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, vlan_proto)), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, priority)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, priority)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, - ingress_ifindex)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, tc_index)), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, hash)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "direct packet read test#3 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, cb[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, napi_id)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5, - offsetof(struct __sk_buff, cb[1])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, cb[2])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, - offsetof(struct __sk_buff, cb[3])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8, - offsetof(struct __sk_buff, cb[4])), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "direct packet read test#4 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, family)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip4)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, local_ip4)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, remote_port)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, local_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid access of tc_classid for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid access of data_meta for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data_meta)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid access of flow_keys for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, flow_keys)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid write access to napi_id for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, napi_id)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9, - offsetof(struct __sk_buff, napi_id)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "valid cgroup storage access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid cgroup storage access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid cgroup storage access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "fd 1 is not pointing to valid bpf_map", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid cgroup storage access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=256 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid cgroup storage access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=-2 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid cgroup storage access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 7), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid cgroup storage access 6", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .errstr_unpriv = "R2 leaks addr into helper function", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "valid per-cpu cgroup storage access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid per-cpu cgroup storage access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid per-cpu cgroup storage access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "fd 1 is not pointing to valid bpf_map", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid per-cpu cgroup storage access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=256 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid per-cpu cgroup storage access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=-2 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid per-cpu cgroup storage access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 7), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "invalid per-cpu cgroup storage access 6", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .errstr_unpriv = "R2 leaks addr into helper function", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "write tstamp from CGROUP_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tstamp)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid bpf_context access off=152 size=8", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "read tstamp from CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tstamp)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "read gso_segs from CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, gso_segs)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "write gso_segs from CGROUP_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, gso_segs)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .result_unpriv = REJECT, - .errstr = "invalid bpf_context access off=164 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - }, - { - "read gso_segs from CLS", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, gso_segs)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "multiple registers share map_lookup_elem result", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "alu ops on ptr_to_map_value_or_null, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "alu ops on ptr_to_map_value_or_null, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "alu ops on ptr_to_map_value_or_null, 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "invalid memory access with multiple map_lookup_elem calls", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = REJECT, - .errstr = "R4 !read_ok", - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "valid indirect map_lookup_elem access with 2nd lookup in branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_2, 10), - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, - { - "invalid map access from else condition", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 unbounded memory access", - .result = REJECT, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "constant register |= constant should keep constant type", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "constant register |= constant should not bypass stack boundary checks", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack type R1 off=-48 access_size=58", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "constant register |= constant register should keep constant type", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_4, 13), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "constant register |= constant register should not bypass stack boundary checks", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_4, 24), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack type R1 off=-48 access_size=58", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "invalid direct packet write for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "invalid direct packet write for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, - }, - { - "direct packet write for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, - }, - { - "direct packet read for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "direct packet read for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, - }, - { - "direct packet read for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, - }, - { - "overlapping checks for direct packet access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, - }, - { - "make headroom for LWT_XMIT", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - /* split for s390 to succeed */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, - }, - { - "invalid access of tc_classid for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - }, - { - "invalid access of tc_classid for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - }, - { - "invalid access of tc_classid for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - }, - { - "leak pointer into ctx 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 2 }, - .errstr_unpriv = "R2 leaks addr into mem", - .result_unpriv = REJECT, - .result = REJECT, - .errstr = "BPF_XADD stores into R1 ctx is not allowed", - }, - { - "leak pointer into ctx 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 leaks addr into mem", - .result_unpriv = REJECT, - .result = REJECT, - .errstr = "BPF_XADD stores into R1 ctx is not allowed", - }, - { - "leak pointer into ctx 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .errstr_unpriv = "R2 leaks addr into ctx", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "leak pointer into map val", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr_unpriv = "R6 leaks addr into mem", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "helper access to map: full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=0", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=56", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: negative range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=0", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=52", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): negative range (> adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const imm): negative range (< adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the array range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=52", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): negative range (> adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via const reg): negative range (< adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via variable): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via variable): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via variable): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct test_val, foo), 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the array range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via variable): no max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 unbounded memory access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to adjusted map (via variable): wrong max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo) + 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=45", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using <, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using <, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 unbounded memory access", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using <=, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using <=, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 unbounded memory access", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<, good access 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 min value is negative", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<=, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<=, good access 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to map: bounds check using s<=, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 min value is negative", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map access: known scalar += value_ptr from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps", - .retval = 1, - }, - { - "map access: value_ptr -= known scalar from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 min value is outside of the array range", - .retval = 1, - }, - { - "map access: known scalar += value_ptr from different maps, but same value properties", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: mixing value pointer and scalar, 1", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_A(2), - // branch B - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R2 tried to add from different pointers or scalars", - .retval = 0, - }, - { - "map access: mixing value pointer and scalar, 2", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - // branch B - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R2 tried to add from different maps or paths", - .retval = 0, - }, - { - "sanitation: alu with different scalars", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0x100001), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .retval = 0x100000, - }, - { - "map access: value_ptr += known scalar, upper oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr += known scalar, upper oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr += known scalar, upper oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr -= known scalar, lower oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the array range", - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - }, - { - "map access: value_ptr -= known scalar, lower oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr -= known scalar, lower oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: known scalar += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: value_ptr += known scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: value_ptr += known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - }, - { - "map access: value_ptr += known scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - }, - { - "map access: value_ptr += known scalar, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr += known scalar, 5", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - }, - { - "map access: value_ptr += known scalar, 6", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - }, - { - "map access: unknown scalar += value_ptr, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: unknown scalar += value_ptr, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - }, - { - "map access: unknown scalar += value_ptr, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 0xabcdef12, - }, - { - "map access: unknown scalar += value_ptr, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_1, 19), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 max value is outside of the array range", - .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", - }, - { - "map access: value_ptr += unknown scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: value_ptr += unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - }, - { - "map access: value_ptr += unknown scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1), - BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, - }, - { - "map access: value_ptr += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 pointer += pointer prohibited", - }, - { - "map access: known scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", - }, - { - "map access: value_ptr -= known scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the array range", - }, - { - "map access: value_ptr -= known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: unknown scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", - }, - { - "map access: value_ptr -= unknown scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is negative", - }, - { - "map access: value_ptr -= unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, - }, - { - "map access: value_ptr -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", - .errstr_unpriv = "R0 pointer -= pointer prohibited", - }, - { - "map lookup helper access to map", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 8 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map update helper access to map", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_update_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map update helper access to map: wrong size", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_update_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .fixup_map_hash_16b = { 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=8 off=0 size=16", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const imm)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, - offsetof(struct other_val, bar)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const imm): out-of-bound 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, - sizeof(struct other_val) - 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=12 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const imm): out-of-bound 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=-4 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const reg)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - offsetof(struct other_val, bar)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const reg): out-of-bound 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, - sizeof(struct other_val) - 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=12 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via const reg): out-of-bound 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, -4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=-4 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via variable)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct other_val, bar), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via variable): no max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map helper access to adjusted map (via variable): wrong max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct other_val, bar) + 1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 11 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=9 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "map element value is preserved across register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - }, - { - "map element value or null is marked on register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - }, - { - "map element value store of cleared call register", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R1 !read_ok", - .errstr = "R1 !read_ok", - .result = REJECT, - .result_unpriv = REJECT, - }, - { - "map element value with unaligned store", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), - BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), - BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), - BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), - BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), - BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "map element value with unaligned load", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "map element value illegal alu op, 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 bitwise operator &= on pointer", - .result = REJECT, - }, - { - "map element value illegal alu op, 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 32-bit pointer arithmetic prohibited", - .result = REJECT, - }, - { - "map element value illegal alu op, 3", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 pointer arithmetic with /= operator", - .result = REJECT, - }, - { - "map element value illegal alu op, 4", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "invalid mem access 'inv'", - .result = REJECT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "map element value illegal alu op, 5", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_3, 4096), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 invalid mem access 'inv'", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "map element value is preserved across register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, - offsetof(struct test_val, foo)), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, bitwise AND, zero included", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect read from stack off -64+0 size 64", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, bitwise AND + JMP, wrong max", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack type R1 off=-64 access_size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP (signed), correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP, bounds + offset", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack type R1 off=-64 access_size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP, wrong max", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid stack type R1 off=-64 access_size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP, no max check", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - /* because max wasn't checked, signed min is negative */ - .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP, no min check", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect read from stack off -64+0 size 64", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: stack, JMP (signed), no min check", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: map, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, - sizeof(struct test_val), 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: map, JMP, wrong max", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, - sizeof(struct test_val) + 1, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=49", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: map adjusted, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, - sizeof(struct test_val) - 20, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: map adjusted, JMP, wrong max", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, - sizeof(struct test_val) - 19, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the array range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=inv expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 0 /* csum_diff of 64-byte packet */, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=inv expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=inv expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: 8 bytes leak", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect read from stack off -64+32 size 64", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "helper access to variable memory: 8 bytes no leak (init memory)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "invalid and of negative number", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the array range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "invalid range check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_9, 1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), - BPF_MOV32_IMM(BPF_REG_3, 1), - BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the array range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "map in map access", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .result = ACCEPT, - }, - { - "invalid inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 pointer arithmetic on map_ptr prohibited", - .result = REJECT, - }, - { - "forgot null checking on the inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 type=map_value_or_null expected=map_ptr", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r1", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R1 !read_ok", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R2 !read_ok", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r3", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .errstr = "R3 !read_ok", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_EXIT_INSN(), - }, - .errstr = "R4 !read_ok", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r5", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .errstr = "R5 !read_ok", - .result = REJECT, - }, - { - "ld_abs: check calling conv, r7", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_7, 0), - BPF_LD_ABS(BPF_W, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "ld_abs: tests on r6 and skb data reload helper", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_vlan_push), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 42 /* ultimate return value */, - }, - { - "ld_ind: check calling conv, r1", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R1 !read_ok", - .result = REJECT, - }, - { - "ld_ind: check calling conv, r2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R2 !read_ok", - .result = REJECT, - }, - { - "ld_ind: check calling conv, r3", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .errstr = "R3 !read_ok", - .result = REJECT, - }, - { - "ld_ind: check calling conv, r4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_EXIT_INSN(), - }, - .errstr = "R4 !read_ok", - .result = REJECT, - }, - { - "ld_ind: check calling conv, r5", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_5, 1), - BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .errstr = "R5 !read_ok", - .result = REJECT, - }, - { - "ld_ind: check calling conv, r7", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 1, - }, - { - "check bpf_perf_event_data->sample_period byte load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), -#else - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period) + 7), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "check bpf_perf_event_data->sample_period half load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period) + 6), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "check bpf_perf_event_data->sample_period word load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), -#else - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period) + 4), -#endif - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "check bpf_perf_event_data->sample_period dword load permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, - }, - { - "check skb->data half load not permitted", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data) + 2), -#endif - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - }, - { - "check skb->tc_classid half load not permitted for lwt prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER == __LITTLE_ENDIAN - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid) + 2), -#endif - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "bounds checks mixing signed and unsigned, positive bounds", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 2), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - }, - { - "bounds checks mixing signed and unsigned, variant 5", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 6", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_6, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R4 min value is negative, either use unsigned", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 7", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - }, - { - "bounds checks mixing signed and unsigned, variant 8", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 9", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - }, - { - "bounds checks mixing signed and unsigned, variant 10", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 11", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - /* Dead branch. */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 12", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -6), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 13", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 2), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 14", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_8, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6), - BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), - BPF_JMP_IMM(BPF_JA, 0, 0, -7), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 15", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -6), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "unbounded min value", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - .result_unpriv = REJECT, - }, - { - "subtraction bounds (map value) variant 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the array range", - .result = REJECT, - }, - { - "subtraction bounds (map value) variant 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, - }, - { - "check subtraction on pointers for unpriv", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1, 9 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R9 pointer -= pointer prohibited", - }, - { - "bounds check based on zero-extended MOV", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0x0000'0000'ffff'ffff */ - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT - }, - { - "bounds check based on sign-extended MOV. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* r0 = */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 4294967295", - .result = REJECT - }, - { - "bounds check based on sign-extended MOV. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xfff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), - /* r0 = */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is outside of the array range", - .result = REJECT - }, - { - "bounds check based on reg_off + var_off + insn_off. test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value_size=8 off=1073741825", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "bounds check based on reg_off + var_off + insn_off. test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value 1073741823", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "bounds check after truncation of non-boundary-crossing range", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - /* r2 = 0x10'0000'0000 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), - /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = [0x00, 0xff] */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT - }, - { - "bounds check after truncation of boundary-crossing range (1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = 0 or - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* no-op or OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - /* not actually fully unbounded, but the bound is very high */ - .errstr = "R0 unbounded memory access", - .result = REJECT - }, - { - "bounds check after truncation of boundary-crossing range (2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - * difference to previous test: truncation via MOV32 - * instead of ALU32. - */ - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = 0 or - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* no-op or OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - /* not actually fully unbounded, but the bound is very high */ - .errstr = "R0 unbounded memory access", - .result = REJECT - }, - { - "bounds check after wrapping 32-bit addition", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - /* r1 = 0x7fff'ffff */ - BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), - /* r1 = 0xffff'fffe */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT - }, - { - "bounds check after shift with oversized count operand", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_2, 32), - BPF_MOV64_IMM(BPF_REG_1, 1), - /* r1 = (u32)1 << (u32)32 = ? */ - BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), - /* r1 = [0x0000, 0xffff] */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the array range", - .result = REJECT - }, - { - "bounds check after right shift of maybe-negative number", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - /* r1 = [-0x01, 0xfe] */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - /* r1 = 0 or 0xff'ffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* r1 = 0 or 0xffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 unbounded memory access", - .result = REJECT - }, - { - "bounds check after 32-bit right shift with 64-bit input", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = 2 */ - BPF_MOV64_IMM(BPF_REG_1, 2), - /* r1 = 1<<32 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), - /* r1 = 0 (NOT 2!) */ - BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31), - /* r1 = 0xffff'fffe (NOT 0!) */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2), - /* computes OOB pointer */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 invalid mem access", - .result = REJECT, - }, - { - "bounds check map access with off+size signed 32bit overflow. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 2147483646", - .result = REJECT - }, - { - "bounds check map access with off+size signed 32bit overflow. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset 1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT - }, - { - "bounds check map access with off+size signed 32bit overflow. test3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset -1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT - }, - { - "bounds check map access with off+size signed 32bit overflow. test4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 1000000), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 1000000000000", - .result = REJECT - }, - { - "pointer/scalar confusion in state equality check (way 1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" - }, - { - "pointer/scalar confusion in state equality check (way 2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" - }, - { - "variable-offset ctx access", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - /* add it to skb. We now have either &skb->len or - * &skb->pkt_type, but we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - /* dereference it */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "variable ctx access var_off=(0x0; 0x4)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "variable-offset stack access", - .insns = { - /* Fill the top 8 bytes of the stack */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), - /* add it to fp. We now have either fp-4 or fp-8, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_EXIT_INSN(), - }, - .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "indirect variable-offset stack access", - .insns = { - /* Fill the top 8 bytes of the stack */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), - /* add it to fp. We now have either fp-4 or fp-8, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it indirectly */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "variable stack read R2", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "direct stack access with 32-bit wraparound. test1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN() - }, - .errstr = "fp pointer and 2147483647", - .result = REJECT - }, - { - "direct stack access with 32-bit wraparound. test2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN() - }, - .errstr = "fp pointer and 1073741823", - .result = REJECT - }, - { - "direct stack access with 32-bit wraparound. test3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN() - }, - .errstr = "fp pointer offset 1073741822", - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result = REJECT - }, - { - "liveness pruning and write screening", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* branch conditions teach us nothing about R2 */ - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, - }, - { - "varlen_map_value_access pruning", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, { "invalid 64-bit BPF_END", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c new file mode 100644 index 000000000000..e0fad1548737 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/and.c @@ -0,0 +1,50 @@ +{ + "invalid and of negative number", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 max value is outside of the array range", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid range check", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_9, 1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), + BPF_MOV32_IMM(BPF_REG_3, 1), + BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 max value is outside of the array range", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c new file mode 100644 index 000000000000..0dcecaf3ec6f --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/array_access.c @@ -0,0 +1,219 @@ +{ + "valid map access into an array with a constant", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "valid map access into an array with a register", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "valid map access into an array with a variable", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "valid map access into an array with a signed variable", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), + BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid map access into an array with a constant", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, + offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=48 size=8", + .result = REJECT, +}, +{ + "invalid map access into an array with a register", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 min value is outside of the array range", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid map access into an array with a variable", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid map access into an array with no floor check", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), + BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .errstr = "R0 unbounded memory access", + .result_unpriv = REJECT, + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid map access into an array with a invalid max check", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .errstr = "invalid access to map value, value_size=48 off=44 size=8", + .result_unpriv = REJECT, + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid map access into an array with a invalid max check", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3, 11 }, + .errstr = "R0 pointer += pointer", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c new file mode 100644 index 000000000000..b8d18642653a --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/basic.c @@ -0,0 +1,23 @@ +{ + "empty prog", + .insns = { + }, + .errstr = "unknown opcode 00", + .result = REJECT, +}, +{ + "only exit insn", + .insns = { + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, +}, +{ + "no bpf_exit", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), + }, + .errstr = "not an exit", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/basic_call.c b/tools/testing/selftests/bpf/verifier/basic_call.c new file mode 100644 index 000000000000..a8c6ab4c1622 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/basic_call.c @@ -0,0 +1,50 @@ +{ + "invalid call insn1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown opcode 8d", + .result = REJECT, +}, +{ + "invalid call insn2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_CALL uses reserved", + .result = REJECT, +}, +{ + "invalid function call", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567), + BPF_EXIT_INSN(), + }, + .errstr = "invalid func unknown#1234567", + .result = REJECT, +}, +{ + "invalid argument register", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "non-invalid argument register", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), + BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c new file mode 100644 index 000000000000..a18686a4bd75 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -0,0 +1,92 @@ +{ + "add+sub+mul", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_2, 3), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = -3, +}, +{ + "xor32 zero extend check", + .insns = { + BPF_MOV32_IMM(BPF_REG_2, -1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32), + BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff), + BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2), + BPF_MOV32_IMM(BPF_REG_0, 2), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "arsh32 on imm", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 on imm 2", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = -16069393, +}, +{ + "arsh32 on reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 on reg 2", + .insns = { + BPF_LD_IMM64(BPF_REG_0, 0xffff55667788), + BPF_MOV64_IMM(BPF_REG_1, 15), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 43724, +}, +{ + "arsh64 on imm", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "arsh64 on reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c new file mode 100644 index 000000000000..b56f8117c09d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/basic_stack.c @@ -0,0 +1,64 @@ +{ + "stack out of bounds", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack", + .result = REJECT, +}, +{ + "uninitialized stack1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 2 }, + .errstr = "invalid indirect read from stack", + .result = REJECT, +}, +{ + "uninitialized stack2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), + BPF_EXIT_INSN(), + }, + .errstr = "invalid read from stack", + .result = REJECT, +}, +{ + "invalid fp arithmetic", + /* If this gets ever changed, make sure JITs can deal with it. */ + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 subtraction from stack pointer", + .result = REJECT, +}, +{ + "non-invalid fp arithmetic", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "misaligned read from stack", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned stack access", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c new file mode 100644 index 000000000000..7a0aab3f2cd2 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/basic_stx_ldx.c @@ -0,0 +1,45 @@ +{ + "invalid src register in STX", + .insns = { + BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R15 is invalid", + .result = REJECT, +}, +{ + "invalid dst register in STX", + .insns = { + BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R14 is invalid", + .result = REJECT, +}, +{ + "invalid dst register in ST", + .insns = { + BPF_ST_MEM(BPF_B, 14, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R14 is invalid", + .result = REJECT, +}, +{ + "invalid src register in LDX", + .insns = { + BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R12 is invalid", + .result = REJECT, +}, +{ + "invalid dst register in LDX", + .insns = { + BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R11 is invalid", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c new file mode 100644 index 000000000000..d55f476f2237 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -0,0 +1,508 @@ +{ + "subtraction bounds (map value) variant 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 max value is outside of the array range", + .result = REJECT, +}, +{ + "subtraction bounds (map value) variant 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), + BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "check subtraction on pointers for unpriv", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP), + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1, 9 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R9 pointer -= pointer prohibited", +}, +{ + "bounds check based on zero-extended MOV", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0x0000'0000'ffff'ffff */ + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0 */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT +}, +{ + "bounds check based on sign-extended MOV. test1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0xffff'ffff'ffff'ffff */ + BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0xffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), + /* r0 = */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access to OOB pointer */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "map_value pointer and 4294967295", + .result = REJECT +}, +{ + "bounds check based on sign-extended MOV. test2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + /* r2 = 0xffff'ffff'ffff'ffff */ + BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), + /* r2 = 0xfff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), + /* r0 = */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + /* access to OOB pointer */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 min value is outside of the array range", + .result = REJECT +}, +{ + "bounds check based on reg_off + var_off + insn_off. test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "value_size=8 off=1073741825", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "bounds check based on reg_off + var_off + insn_off. test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "value 1073741823", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "bounds check after truncation of non-boundary-crossing range", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + /* r2 = 0x10'0000'0000 */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), + /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + /* r1 = [0x00, 0xff] */ + BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), + /* r1 = 0 */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT +}, +{ + "bounds check after truncation of boundary-crossing range (1)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0xffff'ffff] or + * [0x0000'0000, 0x0000'007f] + */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0x00, 0xff] or + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = 0 or + * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op or OOB pointer computation */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + /* not actually fully unbounded, but the bound is very high */ + .errstr = "R0 unbounded memory access", + .result = REJECT +}, +{ + "bounds check after truncation of boundary-crossing range (2)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0xffff'ff80, 0xffff'ffff] or + * [0x0000'0000, 0x0000'007f] + * difference to previous test: truncation via MOV32 + * instead of ALU32. + */ + BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = [0x00, 0xff] or + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), + /* r1 = 0 or + * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] + */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* no-op or OOB pointer computation */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + /* not actually fully unbounded, but the bound is very high */ + .errstr = "R0 unbounded memory access", + .result = REJECT +}, +{ + "bounds check after wrapping 32-bit addition", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + /* r1 = 0x7fff'ffff */ + BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), + /* r1 = 0xffff'fffe */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + /* r1 = 0 */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), + /* no-op */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* access at offset 0 */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT +}, +{ + "bounds check after shift with oversized count operand", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_IMM(BPF_REG_2, 32), + BPF_MOV64_IMM(BPF_REG_1, 1), + /* r1 = (u32)1 << (u32)32 = ? */ + BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), + /* r1 = [0x0000, 0xffff] */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), + /* computes unknown pointer, potentially OOB */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 max value is outside of the array range", + .result = REJECT +}, +{ + "bounds check after right shift of maybe-negative number", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + /* r1 = [0x00, 0xff] */ + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + /* r1 = [-0x01, 0xfe] */ + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), + /* r1 = 0 or 0xff'ffff'ffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* r1 = 0 or 0xffff'ffff'ffff */ + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), + /* computes unknown pointer, potentially OOB */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* potentially OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 unbounded memory access", + .result = REJECT +}, +{ + "bounds check after 32-bit right shift with 64-bit input", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + /* r1 = 2 */ + BPF_MOV64_IMM(BPF_REG_1, 2), + /* r1 = 1<<32 */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), + /* r1 = 0 (NOT 2!) */ + BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31), + /* r1 = 0xffff'fffe (NOT 0!) */ + BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2), + /* computes OOB pointer */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + /* OOB access */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + /* exit */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 invalid mem access", + .result = REJECT, +}, +{ + "bounds check map access with off+size signed 32bit overflow. test1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "map_value pointer and 2147483646", + .result = REJECT +}, +{ + "bounds check map access with off+size signed 32bit overflow. test2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "pointer offset 1073741822", + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .result = REJECT +}, +{ + "bounds check map access with off+size signed 32bit overflow. test3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "pointer offset -1073741822", + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .result = REJECT +}, +{ + "bounds check map access with off+size signed 32bit overflow. test4", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_1, 1000000), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "map_value pointer and 1000000000000", + .result = REJECT +}, diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c new file mode 100644 index 000000000000..9baca7a75c42 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c @@ -0,0 +1,406 @@ +{ + "bounds checks mixing signed and unsigned, positive bounds", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, 2), + BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), + BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), + BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 4", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, +}, +{ + "bounds checks mixing signed and unsigned, variant 5", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_6, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R4 min value is negative, either use unsigned", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 7", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, +}, +{ + "bounds checks mixing signed and unsigned, variant 8", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 9", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, +}, +{ + "bounds checks mixing signed and unsigned, variant 10", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 11", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), + /* Dead branch. */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 12", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -6), + BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 13", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, 2), + BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 14", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_MOV64_IMM(BPF_REG_8, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6), + BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), + BPF_JMP_IMM(BPF_JA, 0, 0, -7), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, +}, +{ + "bounds checks mixing signed and unsigned, variant 15", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_MOV64_IMM(BPF_REG_2, -6), + BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + .result_unpriv = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c new file mode 100644 index 000000000000..349c0862fb4c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/cfg.c @@ -0,0 +1,70 @@ +{ + "unreachable", + .insns = { + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .errstr = "unreachable", + .result = REJECT, +}, +{ + "unreachable2", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unreachable", + .result = REJECT, +}, +{ + "out of range jump", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "out of range jump2", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, -2), + BPF_EXIT_INSN(), + }, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "loop (back-edge)", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, -1), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, +}, +{ + "loop2 (back-edge)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, +}, +{ + "conditional loop", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), + BPF_EXIT_INSN(), + }, + .errstr = "back-edge", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c new file mode 100644 index 000000000000..52e4c03b076b --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/cgroup_skb.c @@ -0,0 +1,197 @@ +{ + "direct packet read test#1 for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, queue_mapping)), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, protocol)), + BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, + offsetof(struct __sk_buff, vlan_present)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "invalid bpf_context access off=76 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "direct packet read test#2 for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, vlan_tci)), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, vlan_proto)), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, priority)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, priority)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, ingress_ifindex)), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, + offsetof(struct __sk_buff, hash)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "direct packet read test#3 for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, + offsetof(struct __sk_buff, napi_id)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8, + offsetof(struct __sk_buff, cb[4])), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "direct packet read test#4 for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, family)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip4)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, local_ip4)), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[3])), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[3])), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, remote_port)), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, local_port)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid access of tc_classid for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid access of data_meta for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data_meta)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid access of flow_keys for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, flow_keys)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid write access to napi_id for CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, + offsetof(struct __sk_buff, napi_id)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9, + offsetof(struct __sk_buff, napi_id)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "write tstamp from CGROUP_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "invalid bpf_context access off=152 size=8", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read tstamp from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tstamp)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c new file mode 100644 index 000000000000..97057c0a1b8a --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/cgroup_storage.c @@ -0,0 +1,220 @@ +{ + "valid cgroup storage access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid cgroup storage access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .result = REJECT, + .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid cgroup storage access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "fd 1 is not pointing to valid bpf_map", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid cgroup storage access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=256 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid cgroup storage access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=-2 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid cgroup storage access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 7), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid cgroup storage access 6", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .errstr_unpriv = "R2 leaks addr into helper function", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "valid per-cpu cgroup storage access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid per-cpu cgroup storage access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .result = REJECT, + .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid per-cpu cgroup storage access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "fd 1 is not pointing to valid bpf_map", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid per-cpu cgroup storage access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=256 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid per-cpu cgroup storage access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_EXIT_INSN(), + }, + .fixup_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=64 off=-2 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "invalid per-cpu cgroup storage access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 7), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "invalid per-cpu cgroup storage access 6", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_percpu_cgroup_storage = { 1 }, + .result = REJECT, + .errstr = "get_local_storage() doesn't support non-zero flags", + .errstr_unpriv = "R2 leaks addr into helper function", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c new file mode 100644 index 000000000000..84446dfc7c1d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/const_or.c @@ -0,0 +1,60 @@ +{ + "constant register |= constant should keep constant type", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "constant register |= constant should not bypass stack boundary checks", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-48 access_size=58", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "constant register |= constant register should keep constant type", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_MOV64_IMM(BPF_REG_4, 13), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "constant register |= constant register should not bypass stack boundary checks", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_MOV64_IMM(BPF_REG_4, 24), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-48 access_size=58", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c new file mode 100644 index 000000000000..0d623af34471 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx.c @@ -0,0 +1,23 @@ +{ + "context stores via ST", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_ST stores into R1 ctx is not allowed", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "context stores via XADD", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1, + BPF_REG_0, offsetof(struct __sk_buff, mark), 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_XADD stores into R1 ctx is not allowed", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c new file mode 100644 index 000000000000..b0195770da6a --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c @@ -0,0 +1,180 @@ +{ + "valid access family in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, family)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "valid access remote_ip4 in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "valid access local_ip4 in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "valid access remote_port in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "valid access local_port in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "valid access remote_ip6 in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, remote_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access local_ip6 in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, local_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access size in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct sk_msg_md, size)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "invalid 64B read of size in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, size)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "invalid read past end of SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, size) + 4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "invalid read offset in SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, family) + 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_MSG, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet read for SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "direct packet write for SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, +{ + "overlapping checks for direct packet access SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, +}, diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c new file mode 100644 index 000000000000..f32e4da991dd --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -0,0 +1,1001 @@ +{ + "access skb fields ok", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, queue_mapping)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, protocol)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, vlan_present)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, vlan_tci)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, napi_id)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "access skb fields bad1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "access skb fields bad2", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "different pointers", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, +{ + "access skb fields bad3", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, pkt_type)), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JA, 0, 0, -12), + }, + .fixup_map_hash_8b = { 6 }, + .errstr = "different pointers", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, +{ + "access skb fields bad4", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JA, 0, 0, -13), + }, + .fixup_map_hash_8b = { 7 }, + .errstr = "different pointers", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, +{ + "invalid access __sk_buff family", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, family)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff remote_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip4)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff local_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip4)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff remote_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff local_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_port)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "invalid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_port)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "valid access __sk_buff family", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, family)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff remote_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff local_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff remote_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff local_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "valid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "invalid access of tc_classid for SK_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + .errstr = "invalid bpf_context access", +}, +{ + "invalid access of skb->mark for SK_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + .errstr = "invalid bpf_context access", +}, +{ + "check skb->mark is not writeable by SK_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + .errstr = "invalid bpf_context access", +}, +{ + "check skb->tc_index is writeable by SK_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "check skb->priority is writeable by SK_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, priority)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "direct packet read for SK_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "direct packet write for SK_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "overlapping checks for direct packet access SK_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, +}, +{ + "check skb->mark is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .errstr_unpriv = "R1 leaks addr", + .result = REJECT, +}, +{ + "check skb->tc_index is not writeable by sockets", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .errstr_unpriv = "R1 leaks addr", + .result = REJECT, +}, +{ + "check cb access: byte", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 3), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "__sk_buff->hash, offset 0, byte store not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, hash)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "__sk_buff->tc_index, offset 3, byte store not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index) + 3), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check skb->hash byte load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check skb->hash byte load permitted 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check skb->hash byte load permitted 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check skb->hash byte load permitted 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check cb access: byte, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "check cb access: half", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3]) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check cb access: half, unaligned", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 1), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check __sk_buff->hash, offset 0, half store not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, hash)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check __sk_buff->tc_index, offset 2, half store not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check skb->hash half load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check skb->hash half load permitted 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash)), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check skb->hash half load not permitted, unaligned 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#endif + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check skb->hash half load not permitted, unaligned 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 3), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), +#endif + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "check cb access: half, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "check cb access: word", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check cb access: word, unaligned 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0]) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: word, unaligned 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 1), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: word, unaligned 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 2), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: word, unaligned 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4]) + 3), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: double", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[2])), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check cb access: double, unaligned 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[1])), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: double, unaligned 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_EXIT_INSN(), + }, + .errstr = "misaligned context access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "check cb access: double, oob 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check cb access: double, oob 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check __sk_buff->ifindex dw store not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, ifindex)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check __sk_buff->ifindex dw load not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, ifindex)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check cb access: double, wrong type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "check out of range skb->cb access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0]) + 256), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .errstr_unpriv = "", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_ACT, +}, +{ + "write skb fields from socket prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[4])), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, cb[2])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .errstr_unpriv = "R1 leaks addr", + .result_unpriv = REJECT, +}, +{ + "write skb fields from tc_cls_act prog", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tc_index)), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[3])), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tstamp)), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, tstamp)), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "", + .result_unpriv = REJECT, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "check skb->data half load not permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", +}, +{ + "read gso_segs from CGROUP_SKB", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "write gso_segs from CGROUP_SKB", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access off=164 size=4", + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, +}, +{ + "read gso_segs from CLS", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, gso_segs)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c new file mode 100644 index 000000000000..650ae00a2ed5 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c @@ -0,0 +1,619 @@ +{ + "direct packet access: test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), + BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access off=76", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, +}, +{ + "direct packet access: test4 (write)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test5 (pkt_end >= reg, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test6 (pkt_end >= reg, bad access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test7 (pkt_end >= reg, both accesses)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test8 (double test, variant 1)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test9 (double test, variant 2)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test10 (write invalid)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test11 (shift, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 144), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 1, +}, +{ + "direct packet access: test12 (and, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_3, 144), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 1, +}, +{ + "direct packet access: test13 (branches, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), + BPF_MOV64_IMM(BPF_REG_3, 14), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 24), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), + BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 1, +}, +{ + "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), + BPF_MOV64_IMM(BPF_REG_5, 12), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 1, +}, +{ + "direct packet access: test15 (spill with xadd)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), + BPF_MOV64_IMM(BPF_REG_5, 4096), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 invalid mem access 'inv'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test16 (arith on data_end)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R3 pointer arithmetic on pkt_end", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test17 (pruning, alignment)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), + BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_A(-6), + }, + .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "direct packet access: test18 (imm += pkt_ptr, 1)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_IMM(BPF_REG_0, 8), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test19 (imm += pkt_ptr, 2)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + BPF_MOV64_IMM(BPF_REG_4, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), + BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test20 (x += pkt_ptr, 1)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test21 (x += pkt_ptr, 2)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), + BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test22 (x += pkt_ptr, 3)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), + BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test23 (x += pkt_ptr, 4)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 31), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test24 (x += pkt_ptr, 5)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 64), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "direct packet access: test25 (marking on <, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test26 (marking on <, bad access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "direct packet access: test27 (marking on <=, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 1, +}, +{ + "direct packet access: test28 (marking on <=, bad access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c new file mode 100644 index 000000000000..698e3779fdd2 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c @@ -0,0 +1,40 @@ +{ + "direct stack access with 32-bit wraparound. test1", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "fp pointer and 2147483647", + .result = REJECT +}, +{ + "direct stack access with 32-bit wraparound. test2", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "fp pointer and 1073741823", + .result = REJECT +}, +{ + "direct stack access with 32-bit wraparound. test3", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "fp pointer offset 1073741822", + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result = REJECT +}, diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c new file mode 100644 index 000000000000..7685edfbcf71 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/div0.c @@ -0,0 +1,184 @@ +{ + "DIV32 by 0, zero check 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "DIV32 by 0, zero check 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "DIV64 by 0, zero check", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "MOD32 by 0, zero check 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "MOD32 by 0, zero check 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "MOD64 by 0, zero check", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 1), + BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "DIV32 by 0, zero check ok, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 2), + BPF_MOV32_IMM(BPF_REG_2, 16), + BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 8, +}, +{ + "DIV32 by 0, zero check 1, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "DIV32 by 0, zero check 2, cls", + .insns = { + BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "DIV64 by 0, zero check, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "MOD32 by 0, zero check ok, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_MOV32_IMM(BPF_REG_1, 3), + BPF_MOV32_IMM(BPF_REG_2, 5), + BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 2, +}, +{ + "MOD32 by 0, zero check 1, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "MOD32 by 0, zero check 2, cls", + .insns = { + BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "MOD64 by 0, zero check 1, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_0, 2), + BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 2, +}, +{ + "MOD64 by 0, zero check 2, cls", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_0, -1), + BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = -1, +}, diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c new file mode 100644 index 000000000000..bd3f38dbe796 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/div_overflow.c @@ -0,0 +1,104 @@ +/* Just make sure that JITs used udiv/umod as otherwise we get + * an exception from INT_MIN/-1 overflow similarly as with div + * by zero. + */ +{ + "DIV32 overflow, check 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_0, INT_MIN), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "DIV32 overflow, check 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, INT_MIN), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "DIV64 overflow, check 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), + BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "DIV64 overflow, check 2", + .insns = { + BPF_LD_IMM64(BPF_REG_0, LLONG_MIN), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, -1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "MOD32 overflow, check 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_0, INT_MIN), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = INT_MIN, +}, +{ + "MOD32 overflow, check 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, INT_MIN), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = INT_MIN, +}, +{ + "MOD64 overflow, check 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "MOD64 overflow, check 2", + .insns = { + BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c new file mode 100644 index 000000000000..1f39d845c64f --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c @@ -0,0 +1,614 @@ +{ + "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, bitwise AND, zero included", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "invalid indirect read from stack off -64+0 size 64", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, bitwise AND + JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP (signed), correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP, bounds + offset", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid stack type R1 off=-64 access_size=65", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP, no max check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + /* because max wasn't checked, signed min is negative */ + .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP, no min check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid indirect read from stack off -64+0 size 64", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: stack, JMP (signed), no min check", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 16), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: map, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: map, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=49", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: map adjusted, JMP, correct bounds", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: map adjusted, JMP, wrong max", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=inv expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 0 /* csum_diff of 64-byte packet */, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=inv expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=inv expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: 8 bytes leak", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .errstr = "invalid indirect read from stack off -64+32 size 64", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to variable memory: 8 bytes no leak (init memory)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c new file mode 100644 index 000000000000..ae54587e9829 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/helper_packet_access.c @@ -0,0 +1,460 @@ +{ + "helper access to packet: test1, valid packet_ptr range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 5 }, + .result_unpriv = ACCEPT, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "helper access to packet: test2, unchecked packet_ptr", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "helper access to packet: test3, variable add", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), + BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 11 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "helper access to packet: test4, packet_ptr with bad range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 7 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "helper access to packet: test5, packet_ptr with too short range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 6 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "helper access to packet: test6, cls valid packet_ptr range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 5 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test7, cls unchecked packet_ptr", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test8, cls variable add", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), + BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 11 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test9, cls packet_ptr with bad range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 7 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test10, cls packet_ptr with too short range", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 6 }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test11, cls unsuitable helper 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_4, 42), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "helper access to the packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test12, cls unsuitable helper 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_4, 4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "helper access to the packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test13, cls helper ok", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test14, cls helper ok sub", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test15, cls helper fail sub", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test16, cls helper fail range 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test17, cls helper fail range 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, -9), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R2 min value is negative", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test18, cls helper fail range 3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, ~0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R2 min value is negative", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test19, cls helper range zero", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test20, pkt end as input", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R1 type=pkt_end expected=fp", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "helper access to packet: test21, wrong reg", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c new file mode 100644 index 000000000000..7572e403ddb9 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/helper_value_access.c @@ -0,0 +1,953 @@ +{ + "helper access to map: full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=0 size=56", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: negative range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=0", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=52", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): negative range (> adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const imm): negative range (< adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): out-of-bound range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo) + 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=52", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): negative range (> adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via const reg): negative range (< adjustment)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, -1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R2 min value is negative", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via variable): full range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via variable): partial range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via variable): empty range", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R1 min value is outside of the array range", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via variable): no max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R1 unbounded memory access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to adjusted map (via variable): wrong max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, + sizeof(struct test_val) - + offsetof(struct test_val, foo) + 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "invalid access to map value, value_size=48 off=4 size=45", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using <, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using <, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = REJECT, + .errstr = "R1 unbounded memory access", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using <=, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using <=, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = REJECT, + .errstr = "R1 unbounded memory access", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<, good access 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = REJECT, + .errstr = "R1 min value is negative", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<=, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<=, good access 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "helper access to map: bounds check using s<=, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .result = REJECT, + .errstr = "R1 min value is negative", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map lookup helper access to map", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 8 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map update helper access to map", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_update_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 10 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map update helper access to map: wrong size", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_update_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .fixup_map_hash_16b = { 10 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=8 off=0 size=16", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const imm)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 9 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const imm): out-of-bound 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 9 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=16 off=12 size=8", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const imm): out-of-bound 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 9 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=16 off=-4 size=8", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const reg)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 10 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const reg): out-of-bound 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 10 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=16 off=12 size=8", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via const reg): out-of-bound 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, -4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 10 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=16 off=-4 size=8", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via variable)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 11 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via variable): no max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 10 }, + .result = REJECT, + .errstr = "R2 unbounded memory access, make sure to bounds check any array access into a map", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "map helper access to adjusted map (via variable): wrong max check", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 3, 11 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=16 off=9 size=8", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/jump.c b/tools/testing/selftests/bpf/verifier/jump.c new file mode 100644 index 000000000000..8e6fcc8940f0 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/jump.c @@ -0,0 +1,180 @@ +{ + "jump test 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "jump test 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 14), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 11), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 5), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "jump test 3", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 19), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_JMP_IMM(BPF_JA, 0, 0, 15), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), + BPF_JMP_IMM(BPF_JA, 0, 0, 11), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40), + BPF_JMP_IMM(BPF_JA, 0, 0, 7), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), + BPF_JMP_IMM(BPF_JA, 0, 0, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 24 }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = -ENOENT, +}, +{ + "jump test 4", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "jump test 5", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/junk_insn.c b/tools/testing/selftests/bpf/verifier/junk_insn.c new file mode 100644 index 000000000000..89d690f1992a --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/junk_insn.c @@ -0,0 +1,45 @@ +{ + "junk insn", + .insns = { + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown opcode 00", + .result = REJECT, +}, +{ + "junk insn2", + .insns = { + BPF_RAW_INSN(1, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_LDX uses reserved fields", + .result = REJECT, +}, +{ + "junk insn3", + .insns = { + BPF_RAW_INSN(-1, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "unknown opcode ff", + .result = REJECT, +}, +{ + "junk insn4", + .insns = { + BPF_RAW_INSN(-1, -1, -1, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "unknown opcode ff", + .result = REJECT, +}, +{ + "junk insn5", + .insns = { + BPF_RAW_INSN(0x7f, -1, -1, -1, -1), + BPF_EXIT_INSN(), + }, + .errstr = "BPF_ALU uses reserved fields", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c new file mode 100644 index 000000000000..287b6fe629fa --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ld_abs.c @@ -0,0 +1,95 @@ +{ + "ld_abs: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, +}, +{ + "ld_abs: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, +}, +{ + "ld_abs: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, +}, +{ + "ld_abs: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, +}, +{ + "ld_abs: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, +}, +{ + "ld_abs: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 0), + BPF_LD_ABS(BPF_W, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "ld_abs: tests on r6 and skb data reload helper", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 42 /* ultimate return value */, +}, diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c new file mode 100644 index 000000000000..28b8c805a293 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c @@ -0,0 +1,141 @@ +{ + "test1 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_LD_IMM insn", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, +{ + "test2 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid BPF_LD_IMM insn", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, +{ + "test3 ld_imm64", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test4 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test5 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test6 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "test7 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "test8 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "uses reserved fields", + .result = REJECT, +}, +{ + "test9 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 1, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test10 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test11 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, +{ + "test12 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "not pointing to valid bpf_map", + .result = REJECT, +}, +{ + "test13 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c new file mode 100644 index 000000000000..079734227538 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ld_ind.c @@ -0,0 +1,72 @@ +{ + "ld_ind: check calling conv, r1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R1 !read_ok", + .result = REJECT, +}, +{ + "ld_ind: check calling conv, r2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, +}, +{ + "ld_ind: check calling conv, r3", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .errstr = "R3 !read_ok", + .result = REJECT, +}, +{ + "ld_ind: check calling conv, r4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_EXIT_INSN(), + }, + .errstr = "R4 !read_ok", + .result = REJECT, +}, +{ + "ld_ind: check calling conv, r5", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_5, 1), + BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .errstr = "R5 !read_ok", + .result = REJECT, +}, +{ + "ld_ind: check calling conv, r7", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c new file mode 100644 index 000000000000..d6eec17f2cd2 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/leak_ptr.c @@ -0,0 +1,67 @@ +{ + "leak pointer into ctx 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 2 }, + .errstr_unpriv = "R2 leaks addr into mem", + .result_unpriv = REJECT, + .result = REJECT, + .errstr = "BPF_XADD stores into R1 ctx is not allowed", +}, +{ + "leak pointer into ctx 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, + offsetof(struct __sk_buff, cb[0])), + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R10 leaks addr into mem", + .result_unpriv = REJECT, + .result = REJECT, + .errstr = "BPF_XADD stores into R1 ctx is not allowed", +}, +{ + "leak pointer into ctx 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, + offsetof(struct __sk_buff, cb[0])), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .errstr_unpriv = "R2 leaks addr into ctx", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "leak pointer into map val", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr_unpriv = "R6 leaks addr into mem", + .result_unpriv = REJECT, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c new file mode 100644 index 000000000000..2cab6a3966bb --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/lwt.c @@ -0,0 +1,189 @@ +{ + "invalid direct packet write for LWT_IN", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "cannot write into packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, +{ + "invalid direct packet write for LWT_OUT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "cannot write into packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_OUT, +}, +{ + "direct packet write for LWT_XMIT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, +}, +{ + "direct packet read for LWT_IN", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, +{ + "direct packet read for LWT_OUT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_OUT, +}, +{ + "direct packet read for LWT_XMIT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, +}, +{ + "overlapping checks for direct packet access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, +}, +{ + "make headroom for LWT_XMIT", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_2, 34), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_skb_change_head), + /* split for s390 to succeed */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 42), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_skb_change_head), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_LWT_XMIT, +}, +{ + "invalid access of tc_classid for LWT_IN", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", +}, +{ + "invalid access of tc_classid for LWT_OUT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", +}, +{ + "invalid access of tc_classid for LWT_XMIT", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", +}, +{ + "check skb->tc_classid half load not permitted for lwt prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, tc_classid) + 2), +#endif + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid bpf_context access", + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c new file mode 100644 index 000000000000..2798927ee9ff --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_in_map.c @@ -0,0 +1,62 @@ +{ + "map in map access", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .result = ACCEPT, +}, +{ + "invalid inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 pointer arithmetic on map_ptr prohibited", + .result = REJECT, +}, +{ + "forgot null checking on the inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 type=map_value_or_null expected=map_ptr", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c new file mode 100644 index 000000000000..bdd0e8d18333 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_ret_val.c @@ -0,0 +1,65 @@ +{ + "invalid map_fd for function call", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), + BPF_EXIT_INSN(), + }, + .errstr = "fd 0 is not pointing to valid bpf_map", + .result = REJECT, +}, +{ + "don't check return value before access", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 invalid mem access 'map_value_or_null'", + .result = REJECT, +}, +{ + "access memory with incorrect alignment", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "misaligned value access", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, +{ + "sometimes access memory with incorrect alignment", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R0 invalid mem access", + .errstr_unpriv = "R0 leaks addr", + .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, +}, diff --git a/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c new file mode 100644 index 000000000000..471c1a5950d8 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/perf_event_sample_period.c @@ -0,0 +1,59 @@ +{ + "check bpf_perf_event_data->sample_period byte load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 7), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "check bpf_perf_event_data->sample_period half load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 6), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "check bpf_perf_event_data->sample_period word load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), +#else + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period) + 4), +#endif + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "check bpf_perf_event_data->sample_period dword load permitted", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c new file mode 100644 index 000000000000..bbdba990fefb --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c @@ -0,0 +1,74 @@ +{ + "prevent map lookup in sockmap", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_sockmap = { 3 }, + .result = REJECT, + .errstr = "cannot pass map_type 15 into func bpf_map_lookup_elem", + .prog_type = BPF_PROG_TYPE_SOCK_OPS, +}, +{ + "prevent map lookup in sockhash", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_sockhash = { 3 }, + .result = REJECT, + .errstr = "cannot pass map_type 18 into func bpf_map_lookup_elem", + .prog_type = BPF_PROG_TYPE_SOCK_OPS, +}, +{ + "prevent map lookup in xskmap", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_xskmap = { 3 }, + .result = REJECT, + .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "prevent map lookup in stack trace", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_stacktrace = { 3 }, + .result = REJECT, + .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem", + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "prevent map lookup in prog array", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_prog2 = { 3 }, + .result = REJECT, + .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem", +}, diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c new file mode 100644 index 000000000000..193d9e87d5a9 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/raw_stack.c @@ -0,0 +1,305 @@ +{ + "raw_stack: no skb_load_bytes", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + /* Call to skb_load_bytes() omitted. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid read from stack off -8+0 size 8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, negative len", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R4 min value is negative", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, negative len 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, ~0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R4 min value is negative", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, zero len", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, no init", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, init", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, spilled regs around bounds", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, spilled regs corruption", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "raw_stack: skb_load_bytes, spilled regs corruption 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, + offsetof(struct __sk_buff, pkt_type)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R3 invalid mem access 'inv'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "raw_stack: skb_load_bytes, spilled regs + data", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, + offsetof(struct __sk_buff, priority)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-513 access_size=8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-1 access_size=8", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R4 min value is negative", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, invalid access 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack type R3 off=-512 access_size=0", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "raw_stack: skb_load_bytes, large access", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_4, 512), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c new file mode 100644 index 000000000000..a9a8f620e71c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/runtime_jit.c @@ -0,0 +1,80 @@ +{ + "runtime/jit: tail_call within bounds, prog once", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 42, +}, +{ + "runtime/jit: tail_call within bounds, prog loop", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 41, +}, +{ + "runtime/jit: tail_call within bounds, no prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "runtime/jit: tail_call out of bounds", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 256), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 2, +}, +{ + "runtime/jit: pass negative index to tail_call", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, -1), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .result = ACCEPT, + .retval = 2, +}, +{ + "runtime/jit: pass > 32bit index to tail_call", + .insns = { + BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 2 }, + .result = ACCEPT, + .retval = 42, + /* Verifier rewrite for unpriv skips tail call here. */ + .retval_unpriv = 2, +}, diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c new file mode 100644 index 000000000000..33e2ea916e2c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -0,0 +1,83 @@ +{ + "pointer/scalar confusion in state equality check (way 1)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_JMP_A(1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_JMP_A(0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .retval = POINTER_VALUE, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr as return value" +}, +{ + "pointer/scalar confusion in state equality check (way 2)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_JMP_A(1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = ACCEPT, + .retval = POINTER_VALUE, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr as return value" +}, +{ + "liveness pruning and write screening", + .insns = { + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* branch conditions teach us nothing about R2 */ + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, +{ + "varlen_map_value_access pruning", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), + BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .errstr = "R0 unbounded memory access", + .result_unpriv = REJECT, + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c new file mode 100644 index 000000000000..d58db72fdfe8 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -0,0 +1,75 @@ +{ + "check valid spill/fill", + .insns = { + /* spill R1(ctx) into stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + /* fill it back into R2 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), + /* should be able to access R0 = *(R2 + 8) */ + /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + .retval = POINTER_VALUE, +}, +{ + "check valid spill/fill, skb mark", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = ACCEPT, +}, +{ + "check corrupted spill/fill", + .insns = { + /* spill R1(ctx) into stack */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + /* mess up with R1 pointer on stack */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), + /* fill back into R0 is fine for priv. + * R0 now becomes SCALAR_VALUE. + */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + /* Load from R0 should fail. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .errstr = "R0 invalid mem access 'inv", + .result = REJECT, +}, +{ + "check corrupted spill/fill, LSB", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = POINTER_VALUE, +}, +{ + "check corrupted spill/fill, MSB", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = POINTER_VALUE, +}, diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c new file mode 100644 index 000000000000..7276620ef242 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c @@ -0,0 +1,317 @@ +{ + "PTR_TO_STACK store/load", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0xfaceb00c, +}, +{ + "PTR_TO_STACK store/load - bad alignment on off", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", +}, +{ + "PTR_TO_STACK store/load - bad alignment on reg", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", +}, +{ + "PTR_TO_STACK store/load - out of bounds low", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off=-79992 size=8", + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", +}, +{ + "PTR_TO_STACK store/load - out of bounds high", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off=0 size=8", +}, +{ + "PTR_TO_STACK check high 1", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK check high 2", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK check high 3", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), + BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK check high 4", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .errstr = "invalid stack off=0 size=1", + .result = REJECT, +}, +{ + "PTR_TO_STACK check high 5", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off", +}, +{ + "PTR_TO_STACK check high 6", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), + BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off", +}, +{ + "PTR_TO_STACK check high 7", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), + BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .errstr = "fp pointer offset", +}, +{ + "PTR_TO_STACK check low 1", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK check low 2", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), + BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1), + BPF_EXIT_INSN(), + }, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK check low 3", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .errstr = "invalid stack off=-513 size=1", + .result = REJECT, +}, +{ + "PTR_TO_STACK check low 4", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "math between fp pointer", +}, +{ + "PTR_TO_STACK check low 5", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off", +}, +{ + "PTR_TO_STACK check low 6", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), + BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid stack off", +}, +{ + "PTR_TO_STACK check low 7", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), + BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .errstr = "fp pointer offset", +}, +{ + "PTR_TO_STACK mixed reg/k, 1", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), + BPF_MOV64_IMM(BPF_REG_2, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK mixed reg/k, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), + BPF_MOV64_IMM(BPF_REG_2, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 42, +}, +{ + "PTR_TO_STACK mixed reg/k, 3", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), + BPF_MOV64_IMM(BPF_REG_2, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = -3, +}, +{ + "PTR_TO_STACK reg", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_MOV64_IMM(BPF_REG_2, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .result_unpriv = REJECT, + .errstr_unpriv = "invalid stack off=0 size=1", + .result = ACCEPT, + .retval = 42, +}, +{ + "stack pointer arithmetic", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c new file mode 100644 index 000000000000..987a5871ff1d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/uninit.c @@ -0,0 +1,39 @@ +{ + "read uninitialized register", + .insns = { + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R2 !read_ok", + .result = REJECT, +}, +{ + "read invalid register", + .insns = { + BPF_MOV64_REG(BPF_REG_0, -1), + BPF_EXIT_INSN(), + }, + .errstr = "R15 is invalid", + .result = REJECT, +}, +{ + "program doesn't init R0 before exit", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, +}, +{ + "program doesn't init R0 before exit in all branches", + .insns = { + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .errstr_unpriv = "R1 pointer comparison", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c new file mode 100644 index 000000000000..dca58cf1a4ab --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/unpriv.c @@ -0,0 +1,521 @@ +{ + "unpriv: return pointer", + .insns = { + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr", + .retval = POINTER_VALUE, +}, +{ + "unpriv: add const to pointer", + .insns = { + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "unpriv: add pointer to pointer", + .insns = { + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R1 pointer += pointer", +}, +{ + "unpriv: neg pointer", + .insns = { + BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 pointer arithmetic", +}, +{ + "unpriv: cmp pointer with const", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 pointer comparison", +}, +{ + "unpriv: cmp pointer with pointer", + .insns = { + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R10 pointer comparison", +}, +{ + "unpriv: check that printk is disallowed", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_2, 8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "unknown func bpf_trace_printk#6", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: pass pointer to helper function", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr_unpriv = "R4 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: indirectly pass pointer on stack to helper function", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "invalid indirect read from stack off -8+0 size 8", + .result = REJECT, +}, +{ + "unpriv: mangle pointer on stack 1", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), + BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: mangle pointer on stack 2", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), + BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "attempt to corrupt spilled", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: read pointer from stack in small chunks", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid size", + .result = REJECT, +}, +{ + "unpriv: write pointer into ctx", + .insns = { + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 leaks addr", + .result_unpriv = REJECT, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "unpriv: spill/fill of ctx", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "unpriv: spill/fill of ctx 2", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of ctx 3", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R1 type=fp expected=ctx", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of ctx 4", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10, BPF_REG_0, -8, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R1 type=inv expected=ctx", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers stx", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct __sk_buff, mark)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "same insn cannot be used with different pointers", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers stx - ctx and sock", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb == NULL) *target = sock; */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* else *target = skb; */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* struct __sk_buff *skb = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* skb->mark = 42; */ + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct __sk_buff, mark)), + /* if (sk) bpf_sk_release(sk) */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "type=ctx expected=sock", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers stx - leak sock", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb == NULL) *target = sock; */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* else *target = skb; */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* struct __sk_buff *skb = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* skb->mark = 42; */ + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + //.errstr = "same insn cannot be used with different pointers", + .errstr = "Unreleased reference", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers stx - sock and ctx (read)", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb) *target = skb */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* else *target = sock */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* struct bpf_sock *sk = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct bpf_sock, mark)), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "same insn cannot be used with different pointers", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers stx - sock and ctx (write)", + .insns = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + /* struct bpf_sock *sock = bpf_sock_lookup(...); */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + /* u64 foo; */ + /* void *target = &foo; */ + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + /* if (skb) *target = skb */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + /* else *target = sock */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + /* struct bpf_sock *sk = *target; */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + /* if (sk) sk->mark = 42; bpf_sk_release(sk); */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_IMM(BPF_REG_3, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, + offsetof(struct bpf_sock, mark)), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + //.errstr = "same insn cannot be used with different pointers", + .errstr = "cannot write into socket", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "unpriv: spill/fill of different pointers ldx", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, + -(__s32)offsetof(struct bpf_perf_event_data, + sample_period) - 8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, + offsetof(struct bpf_perf_event_data, sample_period)), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "same insn cannot be used with different pointers", + .prog_type = BPF_PROG_TYPE_PERF_EVENT, +}, +{ + "unpriv: write pointer into map elem value", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "alu32: mov u32 const", + .insns = { + BPF_MOV32_IMM(BPF_REG_7, 0), + BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1), + BPF_MOV32_REG(BPF_REG_0, BPF_REG_7), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "unpriv: partial copy of pointer", + .insns = { + BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R10 partial copy", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: pass pointer to tail_call", + .insns = { + BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 1 }, + .errstr_unpriv = "R3 leaks addr into helper", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: cmp map pointer with zero", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 1 }, + .errstr_unpriv = "R1 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: write into frame pointer", + .insns = { + BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "frame pointer is read only", + .result = REJECT, +}, +{ + "unpriv: spill/fill frame pointer", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "frame pointer is read only", + .result = REJECT, +}, +{ + "unpriv: cmp of frame pointer", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R10 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: adding of fp", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "unpriv: cmp of stack pointer", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R2 pointer comparison", + .result_unpriv = REJECT, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c new file mode 100644 index 000000000000..0e42592b1218 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/value.c @@ -0,0 +1,104 @@ +{ + "map element value store of cleared call register", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R1 !read_ok", + .errstr = "R1 !read_ok", + .result = REJECT, + .result_unpriv = REJECT, +}, +{ + "map element value with unaligned store", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), + BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), + BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), + BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "map element value with unaligned load", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "map element value is preserved across register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c new file mode 100644 index 000000000000..7135e8021b81 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/value_adj_spill.c @@ -0,0 +1,43 @@ +{ + "map element value is preserved across register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, +}, +{ + "map element value or null is marked on register spilling", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .result = ACCEPT, + .result_unpriv = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c new file mode 100644 index 000000000000..7f6c232cd842 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c @@ -0,0 +1,94 @@ +{ + "map element value illegal alu op, 1", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 bitwise operator &= on pointer", + .result = REJECT, +}, +{ + "map element value illegal alu op, 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 32-bit pointer arithmetic prohibited", + .result = REJECT, +}, +{ + "map element value illegal alu op, 3", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 pointer arithmetic with /= operator", + .result = REJECT, +}, +{ + "map element value illegal alu op, 4", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "invalid mem access 'inv'", + .result = REJECT, + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "map element value illegal alu op, 5", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_IMM(BPF_REG_3, 4096), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 invalid mem access 'inv'", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c new file mode 100644 index 000000000000..860d4a71cd83 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/value_or_null.c @@ -0,0 +1,152 @@ +{ + "multiple registers share map_lookup_elem result", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "alu ops on ptr_to_map_value_or_null, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "R4 pointer arithmetic on map_value_or_null", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "alu ops on ptr_to_map_value_or_null, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "R4 pointer arithmetic on map_value_or_null", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "alu ops on ptr_to_map_value_or_null, 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .errstr = "R4 pointer arithmetic on map_value_or_null", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "invalid memory access with multiple map_lookup_elem calls", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .result = REJECT, + .errstr = "R4 !read_ok", + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "valid indirect map_lookup_elem access with 2nd lookup in branch", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_2, 10), + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 4 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS +}, +{ + "invalid map access from else condition", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 3 }, + .errstr = "R0 unbounded memory access", + .result = REJECT, + .errstr_unpriv = "R0 leaks addr", + .result_unpriv = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c new file mode 100644 index 000000000000..9ab5ace83e02 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -0,0 +1,792 @@ +{ + "map access: known scalar += value_ptr from different maps", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R1 tried to add from different maps", + .retval = 1, +}, +{ + "map access: value_ptr -= known scalar from different maps", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 min value is outside of the array range", + .retval = 1, +}, +{ + "map access: known scalar += value_ptr from different maps, but same value properties", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 5 }, + .fixup_map_array_48b = { 8 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: mixing value pointer and scalar, 1", + .insns = { + // load map value pointer into r0 and r2 + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + // load some number from the map into r1 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + // depending on r1, branch: + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), + // branch A + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_JMP_A(2), + // branch B + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0x100000), + // common instruction + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + // depending on r1, branch: + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + // branch A + BPF_JMP_A(4), + // branch B + BPF_MOV64_IMM(BPF_REG_0, 0x13371337), + // verifier follows fall-through + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + // fake-dead code; targeted from branch A to + // prevent dead code sanitization + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R2 tried to add from different pointers or scalars", + .retval = 0, +}, +{ + "map access: mixing value pointer and scalar, 2", + .insns = { + // load map value pointer into r0 and r2 + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + // load some number from the map into r1 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + // depending on r1, branch: + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + // branch A + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0x100000), + BPF_JMP_A(2), + // branch B + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 0), + // common instruction + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + // depending on r1, branch: + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + // branch A + BPF_JMP_A(4), + // branch B + BPF_MOV64_IMM(BPF_REG_0, 0x13371337), + // verifier follows fall-through + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + // fake-dead code; targeted from branch A to + // prevent dead code sanitization + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R2 tried to add from different maps or paths", + .retval = 0, +}, +{ + "sanitation: alu with different scalars", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_MAP_FD(BPF_REG_ARG1, 0), + BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0x100000), + BPF_JMP_A(2), + BPF_MOV64_IMM(BPF_REG_2, 42), + BPF_MOV64_IMM(BPF_REG_3, 0x100001), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result = ACCEPT, + .retval = 0x100000, +}, +{ + "map access: value_ptr += known scalar, upper oob arith, test 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 48), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr += known scalar, upper oob arith, test 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 49), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr += known scalar, upper oob arith, test 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_IMM(BPF_REG_1, 47), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr -= known scalar, lower oob arith, test 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_1, 47), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 48), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 min value is outside of the array range", + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", +}, +{ + "map access: value_ptr -= known scalar, lower oob arith, test 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_IMM(BPF_REG_1, 47), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 48), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr -= known scalar, lower oob arith, test 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_1, 47), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 47), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: known scalar += value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: value_ptr += known scalar, 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: value_ptr += known scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 49), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "invalid access to map value", +}, +{ + "map access: value_ptr += known scalar, 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "invalid access to map value", +}, +{ + "map access: value_ptr += known scalar, 4", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, -2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr += known scalar, 5", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 0xabcdef12, +}, +{ + "map access: value_ptr += known scalar, 6", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 0xabcdef12, +}, +{ + "map access: unknown scalar += value_ptr, 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: unknown scalar += value_ptr, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 0xabcdef12, +}, +{ + "map access: unknown scalar += value_ptr, 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 0xabcdef12, +}, +{ + "map access: unknown scalar += value_ptr, 4", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_MOV64_IMM(BPF_REG_1, 19), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R1 max value is outside of the array range", + .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", +}, +{ + "map access: value_ptr += unknown scalar, 1", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: value_ptr += unknown scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 0xabcdef12, +}, +{ + "map access: value_ptr += unknown scalar, 3", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1), + BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "map access: value_ptr += value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 pointer += pointer prohibited", +}, +{ + "map access: known scalar -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R1 tried to subtract pointer from scalar", +}, +{ + "map access: value_ptr -= known scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 min value is outside of the array range", +}, +{ + "map access: value_ptr -= known scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: unknown scalar -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R1 tried to subtract pointer from scalar", +}, +{ + "map access: value_ptr -= unknown scalar", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 min value is negative", +}, +{ + "map access: value_ptr -= unknown scalar, 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), + BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .retval = 1, +}, +{ + "map access: value_ptr -= value_ptr", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 pointer -= pointer prohibited", +}, diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c new file mode 100644 index 000000000000..1e536ff121a5 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/var_off.c @@ -0,0 +1,66 @@ +{ + "variable-offset ctx access", + .insns = { + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + /* add it to skb. We now have either &skb->len or + * &skb->pkt_type, but we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + /* dereference it */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "variable ctx access var_off=(0x0; 0x4)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, +{ + "variable-offset stack access", + .insns = { + /* Fill the top 8 bytes of the stack */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), + /* add it to fp. We now have either fp-4 or fp-8, but + * we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* dereference it */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_EXIT_INSN(), + }, + .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, +{ + "indirect variable-offset stack access", + .insns = { + /* Fill the top 8 bytes of the stack */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), + /* add it to fp. We now have either fp-4 or fp-8, but + * we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* dereference it indirectly */ + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 5 }, + .errstr = "variable stack read R2", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, +}, From 48729226238d4d56bdb8de9d54b911f179dd54b7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 25 Jan 2019 15:24:44 -0800 Subject: [PATCH 63/68] selftests: bpf: break up the rest of test_verifier Break up the rest of test_verifier tests into separate files. Signed-off-by: Jakub Kicinski Acked-by: Jiong Wang Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_verifier.c | 6149 ----------------- .../selftests/bpf/verifier/basic_instr.c | 42 + .../selftests/bpf/verifier/bounds_deduction.c | 124 + .../selftests/bpf/verifier/bpf_get_stack.c | 44 + tools/testing/selftests/bpf/verifier/calls.c | 1942 ++++++ .../bpf/verifier/cgroup_inv_retcode.c | 72 + tools/testing/selftests/bpf/verifier/ctx.c | 70 + .../testing/selftests/bpf/verifier/ctx_skb.c | 32 + .../selftests/bpf/verifier/dead_code.c | 159 + .../bpf/verifier/direct_packet_access.c | 14 + tools/testing/selftests/bpf/verifier/jit.c | 88 + tools/testing/selftests/bpf/verifier/jmp32.c | 724 ++ tools/testing/selftests/bpf/verifier/jset.c | 165 + tools/testing/selftests/bpf/verifier/ld_abs.c | 191 + tools/testing/selftests/bpf/verifier/ld_dw.c | 36 + .../selftests/bpf/verifier/map_ptr_mixing.c | 100 + .../testing/selftests/bpf/verifier/masking.c | 322 + .../selftests/bpf/verifier/meta_access.c | 235 + .../selftests/bpf/verifier/ref_tracking.c | 607 ++ .../selftests/bpf/verifier/search_pruning.c | 73 + tools/testing/selftests/bpf/verifier/xadd.c | 97 + tools/testing/selftests/bpf/verifier/xdp.c | 14 + .../bpf/verifier/xdp_direct_packet_access.c | 900 +++ 23 files changed, 6051 insertions(+), 6149 deletions(-) create mode 100644 tools/testing/selftests/bpf/verifier/bounds_deduction.c create mode 100644 tools/testing/selftests/bpf/verifier/bpf_get_stack.c create mode 100644 tools/testing/selftests/bpf/verifier/calls.c create mode 100644 tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c create mode 100644 tools/testing/selftests/bpf/verifier/dead_code.c create mode 100644 tools/testing/selftests/bpf/verifier/jit.c create mode 100644 tools/testing/selftests/bpf/verifier/jmp32.c create mode 100644 tools/testing/selftests/bpf/verifier/jset.c create mode 100644 tools/testing/selftests/bpf/verifier/ld_dw.c create mode 100644 tools/testing/selftests/bpf/verifier/map_ptr_mixing.c create mode 100644 tools/testing/selftests/bpf/verifier/masking.c create mode 100644 tools/testing/selftests/bpf/verifier/meta_access.c create mode 100644 tools/testing/selftests/bpf/verifier/ref_tracking.c create mode 100644 tools/testing/selftests/bpf/verifier/xadd.c create mode 100644 tools/testing/selftests/bpf/verifier/xdp.c create mode 100644 tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index fcdb7e2d2ec0..c5e22422a852 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -251,6155 +251,6 @@ static struct bpf_test tests[] = { #define FILL_ARRAY #include #undef FILL_ARRAY - { - "invalid 64-bit BPF_END", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 0), - { - .code = BPF_ALU64 | BPF_END | BPF_TO_LE, - .dst_reg = BPF_REG_0, - .src_reg = 0, - .off = 0, - .imm = 32, - }, - BPF_EXIT_INSN(), - }, - .errstr = "unknown opcode d7", - .result = REJECT, - }, - { - "XDP, using ifindex from netdev", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, ingress_ifindex)), - BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 1, - }, - { - "meta access, test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet, off=-8", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test5", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_xdp_adjust_meta), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R3 !read_ok", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test7", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test8", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test9", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test10", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_IMM(BPF_REG_5, 42), - BPF_MOV64_IMM(BPF_REG_6, 24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test11", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_IMM(BPF_REG_5, 42), - BPF_MOV64_IMM(BPF_REG_6, 24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "meta access, test12", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "arithmetic ops make PTR_TO_CTX unusable", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct __sk_buff, data) - - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "dereference of modified ctx ptr", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "pkt_end - pkt_start is allowed", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = TEST_DATA_LEN, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "XDP pkt read, pkt_end mangling, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "XDP pkt read, pkt_end mangling, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "XDP pkt read, pkt_data' > pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' > pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' > pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end > pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end > pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end > pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' < pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' < pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' < pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end < pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end < pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end < pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' >= pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' >= pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end >= pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end >= pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end >= pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' <= pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' <= pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data' <= pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end <= pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end <= pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_end <= pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' > pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' > pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' > pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data > pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data > pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data > pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' < pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' < pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data < pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data < pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data < pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' >= pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data >= pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data >= pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data >= pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' <= pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' <= pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_meta' <= pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data <= pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "XDP pkt read, pkt_data <= pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "check deducing bounds from const, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 tried to subtract pointer from scalar", - }, - { - "check deducing bounds from const, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 1, - }, - { - "check deducing bounds from const, 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 tried to subtract pointer from scalar", - }, - { - "check deducing bounds from const, 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "check deducing bounds from const, 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 tried to subtract pointer from scalar", - }, - { - "check deducing bounds from const, 6", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 tried to subtract pointer from scalar", - }, - { - "check deducing bounds from const, 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, ~0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "dereference of modified ctx ptr", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "check deducing bounds from const, 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, ~0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "dereference of modified ctx ptr", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "check deducing bounds from const, 9", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 tried to subtract pointer from scalar", - }, - { - "check deducing bounds from const, 10", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), - /* Marks reg as unknown. */ - BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "math between ctx pointer and register with unbounded min value is not allowed", - }, - { - "bpf_exit with invalid return code. test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x0; 0xffffffff)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x0; 0x3)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test5", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x2; 0x0)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test6", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R0 is not a known value (ctx)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "bpf_exit with invalid return code. test7", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4), - BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has unknown scalar value", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - }, - { - "calls: basic sanity", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .result = ACCEPT, - }, - { - "calls: not on unpriviledged", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 1, - }, - { - "calls: div by 0 in subprog", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(BPF_REG_2, 0), - BPF_MOV32_IMM(BPF_REG_3, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "calls: multiple ret types in subprog 1", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", - }, - { - "calls: multiple ret types in subprog 2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, - offsetof(struct __sk_buff, data)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 16 }, - .result = REJECT, - .errstr = "R0 min value is outside of the array range", - }, - { - "calls: overlapping caller/callee", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "last insn is not an exit or jmp", - .result = REJECT, - }, - { - "calls: wrong recursive calls", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 4), - BPF_JMP_IMM(BPF_JA, 0, 0, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "calls: wrong src reg", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "BPF_CALL uses reserved fields", - .result = REJECT, - }, - { - "calls: wrong off value", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "BPF_CALL uses reserved fields", - .result = REJECT, - }, - { - "calls: jump back loop", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn 0 to 0", - .result = REJECT, - }, - { - "calls: conditional call", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "calls: conditional call 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .result = ACCEPT, - }, - { - "calls: conditional call 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, 4), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, -6), - BPF_MOV64_IMM(BPF_REG_0, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, -6), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, - }, - { - "calls: conditional call 4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, -5), - BPF_MOV64_IMM(BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .result = ACCEPT, - }, - { - "calls: conditional call 5", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, -6), - BPF_MOV64_IMM(BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, - }, - { - "calls: conditional call 6", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge from insn", - .result = REJECT, - }, - { - "calls: using r0 returned by callee", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .result = ACCEPT, - }, - { - "calls: using uninit r0 from callee", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "!read_ok", - .result = REJECT, - }, - { - "calls: callee is using r1", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_ACT, - .result = ACCEPT, - .retval = TEST_DATA_LEN, - }, - { - "calls: callee using args1", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = POINTER_VALUE, - }, - { - "calls: callee using wrong args2", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "R2 !read_ok", - .result = REJECT, - }, - { - "calls: callee using two args", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, len)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, - offsetof(struct __sk_buff, len)), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, - }, - { - "calls: callee changing pkt pointers", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - /* clear_all_pkt_pointers() has to walk all frames - * to make sure that pkt pointers in the caller - * are cleared when callee is calling a helper that - * adjusts packet size - */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_xdp_adjust_head), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R6 invalid mem access 'inv'", - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: two calls with args", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = TEST_DATA_LEN + TEST_DATA_LEN, - }, - { - "calls: calls with stack arith", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 42, - }, - { - "calls: calls with misaligned stack access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, - .errstr = "misaligned stack access", - .result = REJECT, - }, - { - "calls: calls control flow, jump test", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 43), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 43, - }, - { - "calls: calls control flow, jump test 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 43), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "jump out of range from insn 1 to 4", - .result = REJECT, - }, - { - "calls: two calls with bad jump", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "jump out of range from insn 11 to 9", - .result = REJECT, - }, - { - "calls: recursive call. test1", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge", - .result = REJECT, - }, - { - "calls: recursive call. test2", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "back-edge", - .result = REJECT, - }, - { - "calls: unreachable code", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "unreachable insn 6", - .result = REJECT, - }, - { - "calls: invalid call", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "invalid destination", - .result = REJECT, - }, - { - "calls: invalid call 2", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "invalid destination", - .result = REJECT, - }, - { - "calls: jumping across function bodies. test1", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "calls: jumping across function bodies. test2", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "jump out of range", - .result = REJECT, - }, - { - "calls: call without exit", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "not an exit", - .result = REJECT, - }, - { - "calls: call into middle of ld_imm64", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "last insn", - .result = REJECT, - }, - { - "calls: call into middle of other call", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "last insn", - .result = REJECT, - }, - { - "calls: ld_abs with changing ctx data in callee", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_vlan_push), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed", - .result = REJECT, - }, - { - "calls: two calls with bad fallthrough", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .errstr = "not an exit", - .result = REJECT, - }, - { - "calls: two calls with stack read", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, - }, - { - "calls: two calls with stack write", - .insns = { - /* main prog */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), - /* write into stack frame of main prog */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* read from stack frame of main prog */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, - }, - { - "calls: stack overflow using two frames (pre-call access)", - .insns = { - /* prog 1 */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* prog 2 */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .errstr = "combined stack size", - .result = REJECT, - }, - { - "calls: stack overflow using two frames (post-call access)", - .insns = { - /* prog 1 */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_EXIT_INSN(), - - /* prog 2 */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .errstr = "combined stack size", - .result = REJECT, - }, - { - "calls: stack depth check using three frames. test1", - .insns = { - /* main */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* A */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), - BPF_EXIT_INSN(), - /* B */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - /* stack_main=32, stack_A=256, stack_B=64 - * and max(main+A, main+A+B) < 512 - */ - .result = ACCEPT, - }, - { - "calls: stack depth check using three frames. test2", - .insns = { - /* main */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* A */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), - BPF_EXIT_INSN(), - /* B */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - /* stack_main=32, stack_A=64, stack_B=256 - * and max(main+A, main+A+B) < 512 - */ - .result = ACCEPT, - }, - { - "calls: stack depth check using three frames. test3", - .insns = { - /* main */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ - BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), - BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* A */ - BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - /* B */ - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - /* stack_main=64, stack_A=224, stack_B=256 - * and max(main+A, main+A+B) > 512 - */ - .errstr = "combined stack", - .result = REJECT, - }, - { - "calls: stack depth check using three frames. test4", - /* void main(void) { - * func1(0); - * func1(1); - * func2(1); - * } - * void func1(int alloc_or_recurse) { - * if (alloc_or_recurse) { - * frame_pointer[-300] = 1; - * } else { - * func2(alloc_or_recurse); - * } - * } - * void func2(int alloc_or_recurse) { - * if (alloc_or_recurse) { - * frame_pointer[-300] = 1; - * } - * } - */ - .insns = { - /* main */ - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* A */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ - BPF_EXIT_INSN(), - /* B */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = REJECT, - .errstr = "combined stack", - }, - { - "calls: stack depth check using three frames. test5", - .insns = { - /* main */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ - BPF_EXIT_INSN(), - /* A */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ - BPF_EXIT_INSN(), - /* B */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ - BPF_EXIT_INSN(), - /* C */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ - BPF_EXIT_INSN(), - /* D */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ - BPF_EXIT_INSN(), - /* E */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ - BPF_EXIT_INSN(), - /* F */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ - BPF_EXIT_INSN(), - /* G */ - BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ - BPF_EXIT_INSN(), - /* H */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .errstr = "call stack", - .result = REJECT, - }, - { - "calls: spill into caller stack frame", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .errstr = "cannot spill", - .result = REJECT, - }, - { - "calls: write into caller stack frame", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, - .retval = 42, - }, - { - "calls: write into callee stack frame", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_XDP, - .errstr = "cannot return stack pointer", - .result = REJECT, - }, - { - "calls: two calls with stack write and void return", - .insns = { - /* main prog */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* write into stack frame of main prog */ - BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), - BPF_EXIT_INSN(), /* void return */ - }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, - }, - { - "calls: ambiguous return value", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "allowed for root only", - .result_unpriv = REJECT, - .errstr = "R0 !read_ok", - .result = REJECT, - }, - { - "calls: two calls that return map_value", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - /* fetch secound map_value_ptr from the stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - /* call 3rd function twice */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* first time with fp-8 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - /* second time with fp-16 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - /* lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr into stack frame of main prog */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), /* return 0 */ - }, - .prog_type = BPF_PROG_TYPE_XDP, - .fixup_map_hash_8b = { 23 }, - .result = ACCEPT, - }, - { - "calls: two calls that return map_value with bool condition", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - /* call 3rd function twice */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* first time with fp-8 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - /* second time with fp-16 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - /* fetch secound map_value_ptr from the stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - /* lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), /* return 0 */ - /* write map_value_ptr into stack frame of main prog */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), /* return 1 */ - }, - .prog_type = BPF_PROG_TYPE_XDP, - .fixup_map_hash_8b = { 23 }, - .result = ACCEPT, - }, - { - "calls: two calls that return map_value with incorrect bool check", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - /* call 3rd function twice */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* first time with fp-8 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - /* second time with fp-16 */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - /* fetch secound map_value_ptr from the stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - /* lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), /* return 0 */ - /* write map_value_ptr into stack frame of main prog */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), /* return 1 */ - }, - .prog_type = BPF_PROG_TYPE_XDP, - .fixup_map_hash_8b = { 23 }, - .result = REJECT, - .errstr = "invalid read from stack off -16+0 size 8", - }, - { - "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* 1st lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_8, 1), - - /* 2nd lookup from map */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_9, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-16 */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_9, 1), - - /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* if arg2 == 1 do *arg1 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - - /* if arg4 == 1 do *arg3 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 12, 22 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=8 off=2 size=8", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* 1st lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_8, 1), - - /* 2nd lookup from map */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_9, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-16 */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_9, 1), - - /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* if arg2 == 1 do *arg1 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - - /* if arg4 == 1 do *arg3 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 12, 22 }, - .result = ACCEPT, - }, - { - "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* 1st lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_8, 1), - - /* 2nd lookup from map */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_9, 0), // 26 - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - /* write map_value_ptr into stack frame of main prog at fp-16 */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_9, 1), - - /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 - BPF_JMP_IMM(BPF_JA, 0, 0, -30), - - /* subprog 2 */ - /* if arg2 == 1 do *arg1 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - - /* if arg4 == 1 do *arg3 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -8), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 12, 22 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=8 off=2 size=8", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: two calls that receive map_value_ptr_or_null via arg. test1", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* 1st lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_8, 1), - - /* 2nd lookup from map */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_9, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_9, 1), - - /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* if arg2 == 1 do *arg1 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - - /* if arg4 == 1 do *arg3 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 12, 22 }, - .result = ACCEPT, - }, - { - "calls: two calls that receive map_value_ptr_or_null via arg. test2", - .insns = { - /* main prog */ - /* pass fp-16, fp-8 into a function */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - /* 1st lookup from map */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_8, 1), - - /* 2nd lookup from map */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_9, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_9, 1), - - /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* subprog 2 */ - /* if arg2 == 1 do *arg1 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - - /* if arg4 == 0 do *arg3 = 0 */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), - /* fetch map_value_ptr from the stack of this function */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - /* write into map value */ - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_8b = { 12, 22 }, - .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", - }, - { - "calls: pkt_ptr spill into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - /* spill unchecked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - /* now the pkt range is verified, read pkt_ptr from stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = POINTER_VALUE, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 2", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - /* Marking is still kept, but not in all cases safe. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - /* spill unchecked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - /* now the pkt range is verified, read pkt_ptr from stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid access to packet", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 3", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* Marking is still kept and safe here. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - /* spill unchecked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* now the pkt range is verified, read pkt_ptr from stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 4", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* Check marking propagated. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - /* spill unchecked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 5", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - /* spill checked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "same insn cannot be used with different", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - /* spill checked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R4 invalid mem access", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - /* spill checked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R4 invalid mem access", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 8", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - /* spill checked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: pkt_ptr spill into caller stack 9", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_5, 0), - /* spill unchecked pkt_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_5, 1), - /* don't read back pkt_ptr from stack here */ - /* write 4 bytes into packet */ - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid access to packet", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "calls: caller stack init to zero or map_value_or_null", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - /* fetch map_value_or_null or const_zero from stack */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - /* store into map_value */ - BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - /* if (ctx == 0) return; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), - /* else bpf_map_lookup() and *(fp - 8) = r0 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 13 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "calls: stack init to zero and pruning", - .insns = { - /* first make allocated_stack 16 byte */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - /* now fork the execution such that the false branch - * of JGT insn will be verified second and it skisp zero - * init of fp-8 stack slot. If stack liveness marking - * is missing live_read marks from call map_lookup - * processing then pruning will incorrectly assume - * that fp-8 stack slot was unused in the fall-through - * branch and will accept the program incorrectly - */ - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 6 }, - .errstr = "invalid indirect read from stack off -8+0 size 8", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - }, - { - "calls: two calls returning different map pointers for lookup (hash, array)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_48b = { 13 }, - .fixup_map_array_48b = { 16 }, - .result = ACCEPT, - .retval = 1, - }, - { - "calls: two calls returning different map pointers for lookup (hash, map in map)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_in_map = { 16 }, - .fixup_map_array_48b = { 13 }, - .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", - }, - { - "cond: two branches returning different map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 2 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 42, - }, - { - "cond: two branches returning same map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 2, 5 }, - .result_unpriv = ACCEPT, - .result = ACCEPT, - .retval = 42, - }, - { - "search pruning: all branches should be verified (nop operation)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_A(1), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R6 invalid mem access 'inv'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "search pruning: all branches should be verified (invalid stack access)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_JMP_A(1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "invalid read from stack off -16+0 size 8", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "jit: lsh, rsh, arsh by 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_IMM(BPF_REG_1, 0xff), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jit: mov32 for ldimm64, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32), - BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL), - BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jit: mov32 for ldimm64, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL), - BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL), - BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jit: various mul tests", - .insns = { - BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), - BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), - BPF_LD_IMM64(BPF_REG_1, 0xefefefULL), - BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), - BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), - BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), - BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), - BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "xadd/w check unaligned stack", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "xadd/w check unaligned map", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "misaligned value access off", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - }, - { - "xadd/w check unaligned pkt", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 99), - BPF_JMP_IMM(BPF_JA, 0, 0, 6), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), - BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1), - BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "BPF_XADD stores into R2 pkt is not allowed", - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "xadd/w check whether src/dst got mangled, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 3, - }, - { - "xadd/w check whether src/dst got mangled, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), - BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 3, - }, - { - "bpf_get_stack return R0 within range", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), - BPF_MOV64_IMM(BPF_REG_4, 256), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), - BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_9), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - }, - { - "ld_abs: invalid op 1", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_DW, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "unknown opcode", - }, - { - "ld_abs: invalid op 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 256), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_IND(BPF_DW, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "unknown opcode", - }, - { - "ld_abs: nmap reduced", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26), - BPF_MOV32_IMM(BPF_REG_0, 18), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64), - BPF_LD_IND(BPF_W, BPF_REG_7, 14), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60), - BPF_MOV32_IMM(BPF_REG_0, 280971478), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60), - BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15), - BPF_LD_ABS(BPF_H, 12), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13), - BPF_MOV32_IMM(BPF_REG_0, 22), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56), - BPF_LD_IND(BPF_H, BPF_REG_7, 14), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52), - BPF_MOV32_IMM(BPF_REG_0, 17366), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52), - BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV32_IMM(BPF_REG_0, 256), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .data = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 256, - }, - { - "ld_abs: div + abs, test 1", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_B, 3), - BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2), - BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0), - BPF_LD_ABS(BPF_B, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), - BPF_LD_IND(BPF_B, BPF_REG_8, -70), - BPF_EXIT_INSN(), - }, - .data = { - 10, 20, 30, 40, 50, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 10, - }, - { - "ld_abs: div + abs, test 2", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_B, 3), - BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2), - BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0), - BPF_LD_ABS(BPF_B, 128), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), - BPF_LD_IND(BPF_B, BPF_REG_8, -70), - BPF_EXIT_INSN(), - }, - .data = { - 10, 20, 30, 40, 50, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "ld_abs: div + abs, test 3", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), - BPF_LD_ABS(BPF_B, 3), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .data = { - 10, 20, 30, 40, 50, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "ld_abs: div + abs, test 4", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), - BPF_LD_ABS(BPF_B, 256), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .data = { - 10, 20, 30, 40, 50, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, - }, - { - "ld_abs: vlan + abs, test 1", - .insns = { }, - .data = { - 0x34, - }, - .fill_helper = bpf_fill_ld_abs_vlan_push_pop, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0xbef, - }, - { - "ld_abs: vlan + abs, test 2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_vlan_push), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .data = { - 0x34, - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 42, - }, - { - "ld_abs: jump around ld_abs", - .insns = { }, - .data = { - 10, 11, - }, - .fill_helper = bpf_fill_jump_around_ld_abs, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 10, - }, - { - "ld_dw: xor semi-random 64 bit imms, test 1", - .insns = { }, - .data = { }, - .fill_helper = bpf_fill_rand_ld_dw, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 4090, - }, - { - "ld_dw: xor semi-random 64 bit imms, test 2", - .insns = { }, - .data = { }, - .fill_helper = bpf_fill_rand_ld_dw, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2047, - }, - { - "ld_dw: xor semi-random 64 bit imms, test 3", - .insns = { }, - .data = { }, - .fill_helper = bpf_fill_rand_ld_dw, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 511, - }, - { - "ld_dw: xor semi-random 64 bit imms, test 4", - .insns = { }, - .data = { }, - .fill_helper = bpf_fill_rand_ld_dw, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 5, - }, - { - "pass unmodified ctx pointer to helper", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: leak potential reference", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking: leak potential reference on stack", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking: leak potential reference on stack 2", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking: zero potential reference", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking: copy and zero potential references", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking: release reference without check", - .insns = { - BPF_SK_LOOKUP, - /* reference in r0 may be NULL */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=sock_or_null expected=sock", - .result = REJECT, - }, - { - "reference tracking: release reference", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: release reference 2", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: release reference twice", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", - .result = REJECT, - }, - { - "reference tracking: release reference twice inside branch", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", - .result = REJECT, - }, - { - "reference tracking: alloc, check, free in one subbranch", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP, - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ - /* Leak reference in R0 */ - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "reference tracking: alloc, check, free in both subbranches", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP, - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, - }, - { - "reference tracking in call: free reference in subprog", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "pass modified ctx pointer to helper, 1", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "dereference of modified ctx ptr", - }, - { - "pass modified ctx pointer to helper, 2", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .result = REJECT, - .errstr_unpriv = "dereference of modified ctx ptr", - .errstr = "dereference of modified ctx ptr", - }, - { - "pass modified ctx pointer to helper, 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "variable ctx access var_off=(0x0; 0x4)", - }, - { - "mov64 src == dst", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_2), - // Check bounds are OK - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "mov64 src != dst", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - // Check bounds are OK - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "allocated_stack", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8), - BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9), - BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = ACCEPT, - .insn_processed = 15, - }, - { - "masking, test out of bounds 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 5), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 3", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 4", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 5", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 6", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 9", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 10", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 11", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test out of bounds 12", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test in bounds 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 4), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 4, - }, - { - "masking, test in bounds 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test in bounds 3", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xfffffffe, - }, - { - "masking, test in bounds 4", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xabcde), - BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xabcde, - }, - { - "masking, test in bounds 5", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "masking, test in bounds 6", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 46), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 46, - }, - { - "masking, test in bounds 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -46), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 46, - }, - { - "masking, test in bounds 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -47), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, - }, - { - "reference tracking in call: free reference in subprog and outside", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", - .result = REJECT, - }, - { - "reference tracking in call: alloc & leak reference in subprog", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), - BPF_SK_LOOKUP, - /* spill unchecked sk_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking in call: alloc in subprog, release outside", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_SK_LOOKUP, - BPF_EXIT_INSN(), /* return sk */ - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = POINTER_VALUE, - .result = ACCEPT, - }, - { - "reference tracking in call: sk_ptr leak into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP, - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - }, - { - "reference tracking in call: sk_ptr spill into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* now the sk_ptr is verified, free the reference */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP, - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: allow LD_ABS", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: forbid LD_ABS while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP, - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, - }, - { - "reference tracking: allow LD_IND", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, - }, - { - "reference tracking: forbid LD_IND while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, - }, - { - "reference tracking: check reference or tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP, - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: release reference then tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP, - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 18 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: leak possible reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP, - /* bpf_tail_call() */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 16 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, - }, - { - "reference tracking: leak checked reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if (!sk) goto end */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, - }, - { - "reference tracking: mangle and release sock_or_null", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock_or_null prohibited", - .result = REJECT, - }, - { - "reference tracking: mangle and release sock", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock prohibited", - .result = REJECT, - }, - { - "reference tracking: access member", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "reference tracking: write to member", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LD_IMM64(BPF_REG_2, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2, - offsetof(struct bpf_sock, mark)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "cannot write into socket", - .result = REJECT, - }, - { - "reference tracking: invalid 64-bit access of member", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid bpf_sock access off=0 size=8", - .result = REJECT, - }, - { - "reference tracking: access after release", - .insns = { - BPF_SK_LOOKUP, - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "!read_ok", - .result = REJECT, - }, - { - "reference tracking: direct access for lookup", - .insns = { - /* Check that the packet is at least 64B long */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "calls: ctx read at start of subprog", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - }, - { - "check wire_len is not readable by sockets", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, wire_len)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - }, - { - "check wire_len is readable by tc classifier", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, wire_len)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - }, - { - "check wire_len is not writable by tc classifier", - .insns = { - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, wire_len)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid bpf_context access", - .errstr_unpriv = "R1 leaks addr", - .result = REJECT, - }, - { - "calls: cross frame pruning", - .insns = { - /* r8 = !!random(); - * call pruner() - * if (r8) - * do something bad; - */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_8, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .errstr = "!read_ok", - .result = REJECT, - }, - { - "jset: functional", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - - /* reg, bit 63 or bit 0 set, taken */ - BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), - BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - - /* reg, bit 62, not taken */ - BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000), - BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - - /* imm, any bit set, taken */ - BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - - /* imm, bit 31 set, taken */ - BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1), - BPF_EXIT_INSN(), - - /* all good - return r0 == 2 */ - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 7, - .retvals = { - { .retval = 2, - .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), } - }, - { .retval = 2, - .data64 = { (1ULL << 63) | (1U << 31), } - }, - { .retval = 2, - .data64 = { (1ULL << 31) | (1U << 0), } - }, - { .retval = 2, - .data64 = { (__u32)-1, } - }, - { .retval = 2, - .data64 = { ~0x4000000000000000ULL, } - }, - { .retval = 0, - .data64 = { 0, } - }, - { .retval = 0, - .data64 = { ~0ULL, } - }, - }, - }, - { - "jset: sign-extend", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - - BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1), - BPF_EXIT_INSN(), - - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2, - .data = { 1, 0, 0, 0, 0, 0, 0, 1, }, - }, - { - "jset: known const compare", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .retval_unpriv = 1, - .result_unpriv = ACCEPT, - .retval = 1, - .result = ACCEPT, - }, - { - "jset: known const compare bad", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "!read_ok", - .result_unpriv = REJECT, - .errstr = "!read_ok", - .result = REJECT, - }, - { - "jset: unknown const compare taken", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "!read_ok", - .result_unpriv = REJECT, - .errstr = "!read_ok", - .result = REJECT, - }, - { - "jset: unknown const compare not taken", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .errstr_unpriv = "!read_ok", - .result_unpriv = REJECT, - .errstr = "!read_ok", - .result = REJECT, - }, - { - "jset: half-known const compare", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .result_unpriv = ACCEPT, - .result = ACCEPT, - }, - { - "jset: range", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff), - BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3), - BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, - .result_unpriv = ACCEPT, - .result = ACCEPT, - }, - { - "dead code: start", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: mid 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: mid 2", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4), - BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 1, - }, - { - "dead code: end 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: end 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 12), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: end 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 12), - BPF_JMP_IMM(BPF_JA, 0, 0, -5), - }, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: tail of main + func", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 12), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: tail of main + two functions", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 12), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: function in the middle and mid of another func", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 12), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 7), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 7, - }, - { - "dead code: middle of main before call", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 2, - }, - { - "dead code: start of a function", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "function calls to other bpf functions are allowed for root only", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 2, - }, - { - "jset32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - /* reg, high bits shouldn't be tested */ - BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - - BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { 1ULL << 63, } - }, - { .retval = 2, - .data64 = { 1, } - }, - { .retval = 2, - .data64 = { 1ULL << 63 | 1, } - }, - }, - }, - { - "jset32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000), - BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - - BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), - BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { 1ULL << 63, } - }, - { .retval = 2, - .data64 = { 1, } - }, - { .retval = 2, - .data64 = { 1ULL << 63 | 1, } - }, - }, - }, - { - "jset32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1), - BPF_EXIT_INSN(), - BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "jeq32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 2, - .retvals = { - { .retval = 0, - .data64 = { -2, } - }, - { .retval = 2, - .data64 = { -1, } - }, - }, - }, - { - "jeq32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001), - BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { 2, } - }, - { .retval = 2, - .data64 = { 1, } - }, - { .retval = 2, - .data64 = { 1ULL << 63 | 1, } - }, - }, - }, - { - "jeq32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1), - BPF_EXIT_INSN(), - BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "jne32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 2, - .retvals = { - { .retval = 2, - .data64 = { 1, } - }, - { .retval = 0, - .data64 = { -1, } - }, - }, - }, - { - "jne32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), - BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { 1, } - }, - { .retval = 2, - .data64 = { 2, } - }, - { .retval = 2, - .data64 = { 1ULL << 63 | 2, } - }, - }, - }, - { - "jne32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - }, - { - "jge32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { UINT_MAX, } - }, - { .retval = 2, - .data64 = { UINT_MAX - 1, } - }, - { .retval = 0, - .data64 = { 0, } - }, - }, - }, - { - "jge32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { UINT_MAX, } - }, - { .retval = 0, - .data64 = { INT_MAX, } - }, - { .retval = 0, - .data64 = { (UINT_MAX - 1) | 2ULL << 32, } - }, - }, - }, - { - "jge32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jgt32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { UINT_MAX, } - }, - { .retval = 0, - .data64 = { UINT_MAX - 1, } - }, - { .retval = 0, - .data64 = { 0, } - }, - }, - }, - { - "jgt32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { UINT_MAX, } - }, - { .retval = 0, - .data64 = { UINT_MAX - 1, } - }, - { .retval = 0, - .data64 = { (UINT_MAX - 1) | 2ULL << 32, } - }, - }, - }, - { - "jgt32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jle32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { INT_MAX - 1, } - }, - { .retval = 0, - .data64 = { UINT_MAX, } - }, - { .retval = 2, - .data64 = { INT_MAX, } - }, - }, - }, - { - "jle32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { INT_MAX | 1ULL << 32, } - }, - { .retval = 2, - .data64 = { INT_MAX - 2, } - }, - { .retval = 0, - .data64 = { UINT_MAX, } - }, - }, - }, - { - "jle32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jlt32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { INT_MAX, } - }, - { .retval = 0, - .data64 = { UINT_MAX, } - }, - { .retval = 2, - .data64 = { INT_MAX - 1, } - }, - }, - }, - { - "jlt32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { INT_MAX | 1ULL << 32, } - }, - { .retval = 0, - .data64 = { UINT_MAX, } - }, - { .retval = 2, - .data64 = { (INT_MAX - 1) | 3ULL << 32, } - }, - }, - }, - { - "jlt32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jsge32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { 0, } - }, - { .retval = 2, - .data64 = { -1, } - }, - { .retval = 0, - .data64 = { -2, } - }, - }, - }, - { - "jsge32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { -1, } - }, - { .retval = 2, - .data64 = { 0x7fffffff | 1ULL << 32, } - }, - { .retval = 0, - .data64 = { -2, } - }, - }, - }, - { - "jsge32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jsgt32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { (__u32)-2, } - }, - { .retval = 0, - .data64 = { -1, } - }, - { .retval = 2, - .data64 = { 1, } - }, - }, - }, - { - "jsgt32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 0, - .data64 = { 0x7ffffffe, } - }, - { .retval = 0, - .data64 = { 0x1ffffffffULL, } - }, - { .retval = 2, - .data64 = { 0x7fffffff, } - }, - }, - }, - { - "jsgt32: min/max deduction", - .insns = { - BPF_RAND_SEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32), - BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jsle32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { (__u32)-2, } - }, - { .retval = 2, - .data64 = { -1, } - }, - { .retval = 0, - .data64 = { 1, } - }, - }, - }, - { - "jsle32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { 0x7ffffffe, } - }, - { .retval = 2, - .data64 = { (__u32)-1, } - }, - { .retval = 0, - .data64 = { 0x7fffffff | 2ULL << 32, } - }, - }, - }, - { - "jsle32: min/max deduction", - .insns = { - BPF_RAND_UEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), - BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, - { - "jslt32: BPF_K", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { (__u32)-2, } - }, - { .retval = 0, - .data64 = { -1, } - }, - { .retval = 0, - .data64 = { 1, } - }, - }, - }, - { - "jslt32: BPF_X", - .insns = { - BPF_DIRECT_PKT_R2, - BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), - BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .runs = 3, - .retvals = { - { .retval = 2, - .data64 = { 0x7ffffffe, } - }, - { .retval = 2, - .data64 = { 0xffffffff, } - }, - { .retval = 0, - .data64 = { 0x7fffffff | 2ULL << 32, } - }, - }, - }, - { - "jslt32: min/max deduction", - .insns = { - BPF_RAND_SEXT_R7, - BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), - BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32), - BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), - BPF_EXIT_INSN(), - BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 2, - }, }; static int probe_filter_length(const struct bpf_insn *fp) diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c index a18686a4bd75..ed91a7b9a456 100644 --- a/tools/testing/selftests/bpf/verifier/basic_instr.c +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -90,3 +90,45 @@ }, .result = ACCEPT, }, +{ + "invalid 64-bit BPF_END", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 0), + { + .code = BPF_ALU64 | BPF_END | BPF_TO_LE, + .dst_reg = BPF_REG_0, + .src_reg = 0, + .off = 0, + .imm = 32, + }, + BPF_EXIT_INSN(), + }, + .errstr = "unknown opcode d7", + .result = REJECT, +}, +{ + "mov64 src == dst", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_2), + // Check bounds are OK + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "mov64 src != dst", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), + // Check bounds are OK + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c new file mode 100644 index 000000000000..1fd07a4f27ac --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c @@ -0,0 +1,124 @@ +{ + "check deducing bounds from const, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", +}, +{ + "check deducing bounds from const, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "check deducing bounds from const, 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", +}, +{ + "check deducing bounds from const, 4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "check deducing bounds from const, 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", +}, +{ + "check deducing bounds from const, 6", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", +}, +{ + "check deducing bounds from const, 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, ~0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "dereference of modified ctx ptr", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "check deducing bounds from const, 8", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, ~0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "dereference of modified ctx ptr", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "check deducing bounds from const, 9", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 tried to subtract pointer from scalar", +}, +{ + "check deducing bounds from const, 10", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), + /* Marks reg as unknown. */ + BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "math between ctx pointer and register with unbounded min value is not allowed", +}, diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c new file mode 100644 index 000000000000..f24d50f09dbe --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c @@ -0,0 +1,44 @@ +{ + "bpf_get_stack return R0 within range", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), + BPF_MOV64_IMM(BPF_REG_4, 256), + BPF_EMIT_CALL(BPF_FUNC_get_stack), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), + BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_9), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_EMIT_CALL(BPF_FUNC_get_stack), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 4 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c new file mode 100644 index 000000000000..4004891afa9c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -0,0 +1,1942 @@ +{ + "calls: basic sanity", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, +{ + "calls: not on unpriviledged", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, +}, +{ + "calls: div by 0 in subprog", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(BPF_REG_2, 0), + BPF_MOV32_IMM(BPF_REG_3, 1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_3, BPF_REG_2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "calls: multiple ret types in subprog 1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV32_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", +}, +{ + "calls: multiple ret types in subprog 2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 9), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, + offsetof(struct __sk_buff, data)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 16 }, + .result = REJECT, + .errstr = "R0 min value is outside of the array range", +}, +{ + "calls: overlapping caller/callee", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "last insn is not an exit or jmp", + .result = REJECT, +}, +{ + "calls: wrong recursive calls", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 4), + BPF_JMP_IMM(BPF_JA, 0, 0, 4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "calls: wrong src reg", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "BPF_CALL uses reserved fields", + .result = REJECT, +}, +{ + "calls: wrong off value", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "BPF_CALL uses reserved fields", + .result = REJECT, +}, +{ + "calls: jump back loop", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge from insn 0 to 0", + .result = REJECT, +}, +{ + "calls: conditional call", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "calls: conditional call 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, +{ + "calls: conditional call 3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_JMP_IMM(BPF_JA, 0, 0, 4), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, -6), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_JMP_IMM(BPF_JA, 0, 0, -6), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge from insn", + .result = REJECT, +}, +{ + "calls: conditional call 4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, -5), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, +{ + "calls: conditional call 5", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, -6), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge from insn", + .result = REJECT, +}, +{ + "calls: conditional call 6", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge from insn", + .result = REJECT, +}, +{ + "calls: using r0 returned by callee", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .result = ACCEPT, +}, +{ + "calls: using uninit r0 from callee", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "!read_ok", + .result = REJECT, +}, +{ + "calls: callee is using r1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_ACT, + .result = ACCEPT, + .retval = TEST_DATA_LEN, +}, +{ + "calls: callee using args1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = POINTER_VALUE, +}, +{ + "calls: callee using wrong args2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "R2 !read_ok", + .result = REJECT, +}, +{ + "calls: callee using two args", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, + offsetof(struct __sk_buff, len)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, + offsetof(struct __sk_buff, len)), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = TEST_DATA_LEN + TEST_DATA_LEN - ETH_HLEN - ETH_HLEN, +}, +{ + "calls: callee changing pkt pointers", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + /* clear_all_pkt_pointers() has to walk all frames + * to make sure that pkt pointers in the caller + * are cleared when callee is calling a helper that + * adjusts packet size + */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_head), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R6 invalid mem access 'inv'", + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: two calls with args", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = TEST_DATA_LEN + TEST_DATA_LEN, +}, +{ + "calls: calls with stack arith", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 42, +}, +{ + "calls: calls with misaligned stack access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, + .errstr = "misaligned stack access", + .result = REJECT, +}, +{ + "calls: calls control flow, jump test", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 43), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 43, +}, +{ + "calls: calls control flow, jump test 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 43), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "jump out of range from insn 1 to 4", + .result = REJECT, +}, +{ + "calls: two calls with bad jump", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "jump out of range from insn 11 to 9", + .result = REJECT, +}, +{ + "calls: recursive call. test1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge", + .result = REJECT, +}, +{ + "calls: recursive call. test2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "back-edge", + .result = REJECT, +}, +{ + "calls: unreachable code", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "unreachable insn 6", + .result = REJECT, +}, +{ + "calls: invalid call", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "invalid destination", + .result = REJECT, +}, +{ + "calls: invalid call 2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "invalid destination", + .result = REJECT, +}, +{ + "calls: jumping across function bodies. test1", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "calls: jumping across function bodies. test2", + .insns = { + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "jump out of range", + .result = REJECT, +}, +{ + "calls: call without exit", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "not an exit", + .result = REJECT, +}, +{ + "calls: call into middle of ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "last insn", + .result = REJECT, +}, +{ + "calls: call into middle of other call", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "last insn", + .result = REJECT, +}, +{ + "calls: ld_abs with changing ctx data in callee", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_vlan_push), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed", + .result = REJECT, +}, +{ + "calls: two calls with bad fallthrough", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .errstr = "not an exit", + .result = REJECT, +}, +{ + "calls: two calls with stack read", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, +{ + "calls: two calls with stack write", + .insns = { + /* main prog */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), + /* write into stack frame of main prog */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* read from stack frame of main prog */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, +{ + "calls: stack overflow using two frames (pre-call access)", + .insns = { + /* prog 1 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* prog 2 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "combined stack size", + .result = REJECT, +}, +{ + "calls: stack overflow using two frames (post-call access)", + .insns = { + /* prog 1 */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + + /* prog 2 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "combined stack size", + .result = REJECT, +}, +{ + "calls: stack depth check using three frames. test1", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=32, stack_A=256, stack_B=64 + * and max(main+A, main+A+B) < 512 + */ + .result = ACCEPT, +}, +{ + "calls: stack depth check using three frames. test2", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=32, stack_A=64, stack_B=256 + * and max(main+A, main+A+B) < 512 + */ + .result = ACCEPT, +}, +{ + "calls: stack depth check using three frames. test3", + .insns = { + /* main */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ + BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + /* B */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=64, stack_A=224, stack_B=256 + * and max(main+A, main+A+B) > 512 + */ + .errstr = "combined stack", + .result = REJECT, +}, +{ + "calls: stack depth check using three frames. test4", + /* void main(void) { + * func1(0); + * func1(1); + * func2(1); + * } + * void func1(int alloc_or_recurse) { + * if (alloc_or_recurse) { + * frame_pointer[-300] = 1; + * } else { + * func2(alloc_or_recurse); + * } + * } + * void func2(int alloc_or_recurse) { + * if (alloc_or_recurse) { + * frame_pointer[-300] = 1; + * } + * } + */ + .insns = { + /* main */ + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ + BPF_EXIT_INSN(), + /* B */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = REJECT, + .errstr = "combined stack", +}, +{ + "calls: stack depth check using three frames. test5", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ + BPF_EXIT_INSN(), + /* A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ + BPF_EXIT_INSN(), + /* C */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ + BPF_EXIT_INSN(), + /* D */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ + BPF_EXIT_INSN(), + /* E */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ + BPF_EXIT_INSN(), + /* F */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ + BPF_EXIT_INSN(), + /* G */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ + BPF_EXIT_INSN(), + /* H */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "call stack", + .result = REJECT, +}, +{ + "calls: spill into caller stack frame", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "cannot spill", + .result = REJECT, +}, +{ + "calls: write into caller stack frame", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, + .retval = 42, +}, +{ + "calls: write into callee stack frame", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "cannot return stack pointer", + .result = REJECT, +}, +{ + "calls: two calls with stack write and void return", + .insns = { + /* main prog */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* write into stack frame of main prog */ + BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), + BPF_EXIT_INSN(), /* void return */ + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = ACCEPT, +}, +{ + "calls: ambiguous return value", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "allowed for root only", + .result_unpriv = REJECT, + .errstr = "R0 !read_ok", + .result = REJECT, +}, +{ + "calls: two calls that return map_value", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + /* fetch secound map_value_ptr from the stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + /* call 3rd function twice */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* first time with fp-8 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + /* second time with fp-16 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + /* lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr into stack frame of main prog */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), /* return 0 */ + }, + .prog_type = BPF_PROG_TYPE_XDP, + .fixup_map_hash_8b = { 23 }, + .result = ACCEPT, +}, +{ + "calls: two calls that return map_value with bool condition", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + /* call 3rd function twice */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* first time with fp-8 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + /* second time with fp-16 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + /* fetch secound map_value_ptr from the stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + /* lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), /* return 0 */ + /* write map_value_ptr into stack frame of main prog */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), /* return 1 */ + }, + .prog_type = BPF_PROG_TYPE_XDP, + .fixup_map_hash_8b = { 23 }, + .result = ACCEPT, +}, +{ + "calls: two calls that return map_value with incorrect bool check", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + /* call 3rd function twice */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* first time with fp-8 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + /* second time with fp-16 */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + /* fetch secound map_value_ptr from the stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + /* lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), /* return 0 */ + /* write map_value_ptr into stack frame of main prog */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), /* return 1 */ + }, + .prog_type = BPF_PROG_TYPE_XDP, + .fixup_map_hash_8b = { 23 }, + .result = REJECT, + .errstr = "invalid read from stack off -16+0 size 8", +}, +{ + "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* 1st lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_8, 1), + + /* 2nd lookup from map */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-16 */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_9, 1), + + /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* if arg2 == 1 do *arg1 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + + /* if arg4 == 1 do *arg3 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 12, 22 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=8 off=2 size=8", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* 1st lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_8, 1), + + /* 2nd lookup from map */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-16 */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_9, 1), + + /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* if arg2 == 1 do *arg1 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + + /* if arg4 == 1 do *arg3 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 12, 22 }, + .result = ACCEPT, +}, +{ + "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* 1st lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_8, 1), + + /* 2nd lookup from map */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_9, 0), // 26 + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + /* write map_value_ptr into stack frame of main prog at fp-16 */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_9, 1), + + /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 + BPF_JMP_IMM(BPF_JA, 0, 0, -30), + + /* subprog 2 */ + /* if arg2 == 1 do *arg1 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + + /* if arg4 == 1 do *arg3 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -8), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 12, 22 }, + .result = REJECT, + .errstr = "invalid access to map value, value_size=8 off=2 size=8", + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: two calls that receive map_value_ptr_or_null via arg. test1", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* 1st lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_8, 1), + + /* 2nd lookup from map */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_9, 1), + + /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* if arg2 == 1 do *arg1 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + + /* if arg4 == 1 do *arg3 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 12, 22 }, + .result = ACCEPT, +}, +{ + "calls: two calls that receive map_value_ptr_or_null via arg. test2", + .insns = { + /* main prog */ + /* pass fp-16, fp-8 into a function */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), + /* 1st lookup from map */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_8, 1), + + /* 2nd lookup from map */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ + BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_9, 1), + + /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* subprog 2 */ + /* if arg2 == 1 do *arg1 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + + /* if arg4 == 0 do *arg3 = 0 */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), + /* fetch map_value_ptr from the stack of this function */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + /* write into map value */ + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_8b = { 12, 22 }, + .result = REJECT, + .errstr = "R0 invalid mem access 'inv'", +}, +{ + "calls: pkt_ptr spill into caller stack", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + /* spill unchecked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + /* now the pkt range is verified, read pkt_ptr from stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = POINTER_VALUE, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 2", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + /* Marking is still kept, but not in all cases safe. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + /* spill unchecked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + /* now the pkt range is verified, read pkt_ptr from stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid access to packet", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 3", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + /* Marking is still kept and safe here. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + /* spill unchecked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* now the pkt range is verified, read pkt_ptr from stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 4", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + /* Check marking propagated. */ + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + /* spill unchecked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 5", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + /* spill checked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "same insn cannot be used with different", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + /* spill checked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R4 invalid mem access", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + /* spill checked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R4 invalid mem access", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 8", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), + /* spill checked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: pkt_ptr spill into caller stack 9", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_IMM(BPF_REG_5, 0), + /* spill unchecked pkt_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_5, 1), + /* don't read back pkt_ptr from stack here */ + /* write 4 bytes into packet */ + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid access to packet", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "calls: caller stack init to zero or map_value_or_null", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + /* fetch map_value_or_null or const_zero from stack */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + /* store into map_value */ + BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + /* if (ctx == 0) return; */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), + /* else bpf_map_lookup() and *(fp - 8) = r0 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 13 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "calls: stack init to zero and pruning", + .insns = { + /* first make allocated_stack 16 byte */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + /* now fork the execution such that the false branch + * of JGT insn will be verified second and it skisp zero + * init of fp-8 stack slot. If stack liveness marking + * is missing live_read marks from call map_lookup + * processing then pruning will incorrectly assume + * that fp-8 stack slot was unused in the fall-through + * branch and will accept the program incorrectly + */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_48b = { 6 }, + .errstr = "invalid indirect read from stack off -8+0 size 8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "calls: ctx read at start of subprog", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), + BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, +}, +{ + "calls: cross frame pruning", + .insns = { + /* r8 = !!random(); + * call pruner() + * if (r8) + * do something bad; + */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_IMM(BPF_REG_8, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_8, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .errstr = "!read_ok", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c new file mode 100644 index 000000000000..6d65fe3e7321 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c @@ -0,0 +1,72 @@ +{ + "bpf_exit with invalid return code. test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x0; 0xffffffff)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x0; 0x3)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has value (0x2; 0x0)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test6", + .insns = { + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr = "R0 is not a known value (ctx)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, +{ + "bpf_exit with invalid return code. test7", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4), + BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .errstr = "R0 has unknown scalar value", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, +}, diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c index 0d623af34471..92762c08f5e3 100644 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ b/tools/testing/selftests/bpf/verifier/ctx.c @@ -21,3 +21,73 @@ .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, +{ + "arithmetic ops make PTR_TO_CTX unusable", + .insns = { + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, + offsetof(struct __sk_buff, data) - + offsetof(struct __sk_buff, mark)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, + .errstr = "dereference of modified ctx ptr", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "pass unmodified ctx pointer to helper", + .insns = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_csum_update), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "pass modified ctx pointer to helper, 1", + .insns = { + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_csum_update), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "dereference of modified ctx ptr", +}, +{ + "pass modified ctx pointer to helper, 2", + .insns = { + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_socket_cookie), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result_unpriv = REJECT, + .result = REJECT, + .errstr_unpriv = "dereference of modified ctx ptr", + .errstr = "dereference of modified ctx ptr", +}, +{ + "pass modified ctx pointer to helper, 3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_csum_update), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "variable ctx access var_off=(0x0; 0x4)", +}, diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index f32e4da991dd..881f1c7f57a1 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c @@ -999,3 +999,35 @@ .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, +{ + "check wire_len is not readable by sockets", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, +}, +{ + "check wire_len is readable by tc classifier", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "check wire_len is not writable by tc classifier", + .insns = { + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, + offsetof(struct __sk_buff, wire_len)), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid bpf_context access", + .errstr_unpriv = "R1 leaks addr", + .result = REJECT, +}, diff --git a/tools/testing/selftests/bpf/verifier/dead_code.c b/tools/testing/selftests/bpf/verifier/dead_code.c new file mode 100644 index 000000000000..50a8a63be4ac --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/dead_code.c @@ -0,0 +1,159 @@ +{ + "dead code: start", + .insns = { + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: mid 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: mid 2", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 1, +}, +{ + "dead code: end 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: end 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: end 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_JMP_IMM(BPF_JA, 0, 0, -5), + }, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: tail of main + func", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: tail of main + two functions", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: function in the middle and mid of another func", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 12), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 7), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 7, +}, +{ + "dead code: middle of main before call", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1), + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 2, +}, +{ + "dead code: start of a function", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "function calls to other bpf functions are allowed for root only", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 2, +}, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c index 650ae00a2ed5..e3fc22e672c2 100644 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c @@ -1,3 +1,17 @@ +{ + "pkt_end - pkt_start is allowed", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = TEST_DATA_LEN, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, { "direct packet access: test1", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/jit.c b/tools/testing/selftests/bpf/verifier/jit.c new file mode 100644 index 000000000000..be488b4495a3 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/jit.c @@ -0,0 +1,88 @@ +{ + "jit: lsh, rsh, arsh by 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 0xff), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jit: mov32 for ldimm64, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32), + BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jit: mov32 for ldimm64, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL), + BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jit: various mul tests", + .insns = { + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_1, 0xefefefULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c new file mode 100644 index 000000000000..ceb39ffa0e88 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/jmp32.c @@ -0,0 +1,724 @@ +{ + "jset32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + /* reg, high bits shouldn't be tested */ + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, -2, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1ULL << 63, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, +}, +{ + "jset32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000000), + BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), + BPF_JMP32_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1ULL << 63, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, +}, +{ + "jset32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JSET, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x10, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "jeq32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 2, + .retvals = { + { .retval = 0, + .data64 = { -2, } + }, + { .retval = 2, + .data64 = { -1, } + }, + }, +}, +{ + "jeq32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x7000000000000001), + BPF_JMP32_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 2, } + }, + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 1, } + }, + }, +}, +{ + "jeq32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JEQ, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, 0xf, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "jne32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 2, + .retvals = { + { .retval = 2, + .data64 = { 1, } + }, + { .retval = 0, + .data64 = { -1, } + }, + }, +}, +{ + "jne32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), + BPF_JMP32_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 1, } + }, + { .retval = 2, + .data64 = { 2, } + }, + { .retval = 2, + .data64 = { 1ULL << 63 | 2, } + }, + }, +}, +{ + "jne32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP32_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x10, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, +}, +{ + "jge32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, UINT_MAX - 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { 0, } + }, + }, +}, +{ + "jge32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, UINT_MAX | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { INT_MAX, } + }, + { .retval = 0, + .data64 = { (UINT_MAX - 1) | 2ULL << 32, } + }, + }, +}, +{ + "jge32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JGE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jgt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JGT, BPF_REG_7, UINT_MAX - 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { 0, } + }, + }, +}, +{ + "jgt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (UINT_MAX - 1) | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { UINT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX - 1, } + }, + { .retval = 0, + .data64 = { (UINT_MAX - 1) | 2ULL << 32, } + }, + }, +}, +{ + "jgt32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jle32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, INT_MAX, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { INT_MAX - 1, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { INT_MAX, } + }, + }, +}, +{ + "jle32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (INT_MAX - 1) | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX | 1ULL << 32, } + }, + { .retval = 2, + .data64 = { INT_MAX - 2, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + }, +}, +{ + "jle32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JLE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jlt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JLT, BPF_REG_7, INT_MAX, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { INT_MAX - 1, } + }, + }, +}, +{ + "jlt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, INT_MAX | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { INT_MAX | 1ULL << 32, } + }, + { .retval = 0, + .data64 = { UINT_MAX, } + }, + { .retval = 2, + .data64 = { (INT_MAX - 1) | 3ULL << 32, } + }, + }, +}, +{ + "jlt32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jsge32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSGE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0, } + }, + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { -2, } + }, + }, +}, +{ + "jsge32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, (__u32)-1 | 2ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 2, + .data64 = { 0x7fffffff | 1ULL << 32, } + }, + { .retval = 0, + .data64 = { -2, } + }, + }, +}, +{ + "jsge32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JSGE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jsgt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSGT, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { (__u32)-2, } + }, + { .retval = 0, + .data64 = { -1, } + }, + { .retval = 2, + .data64 = { 1, } + }, + }, +}, +{ + "jsgt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 0, + .data64 = { 0x7ffffffe, } + }, + { .retval = 0, + .data64 = { 0x1ffffffffULL, } + }, + { .retval = 2, + .data64 = { 0x7fffffff, } + }, + }, +}, +{ + "jsgt32: min/max deduction", + .insns = { + BPF_RAND_SEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, (__u32)(-2) | 1ULL << 32), + BPF_JMP32_REG(BPF_JSGT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, -2, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jsle32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSLE, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { (__u32)-2, } + }, + { .retval = 2, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { 1, } + }, + }, +}, +{ + "jsle32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7ffffffe | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0x7ffffffe, } + }, + { .retval = 2, + .data64 = { (__u32)-1, } + }, + { .retval = 0, + .data64 = { 0x7fffffff | 2ULL << 32, } + }, + }, +}, +{ + "jsle32: min/max deduction", + .insns = { + BPF_RAND_UEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, 0x7ffffff0 | 1ULL << 32), + BPF_JMP32_REG(BPF_JSLE, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_7, 0x7ffffff0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, +{ + "jslt32: BPF_K", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { (__u32)-2, } + }, + { .retval = 0, + .data64 = { -1, } + }, + { .retval = 0, + .data64 = { 1, } + }, + }, +}, +{ + "jslt32: BPF_X", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LD_IMM64(BPF_REG_8, 0x7fffffff | 1ULL << 32), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 3, + .retvals = { + { .retval = 2, + .data64 = { 0x7ffffffe, } + }, + { .retval = 2, + .data64 = { 0xffffffff, } + }, + { .retval = 0, + .data64 = { 0x7fffffff | 2ULL << 32, } + }, + }, +}, +{ + "jslt32: min/max deduction", + .insns = { + BPF_RAND_SEXT_R7, + BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_8, (__u32)(-1) | 1ULL << 32), + BPF_JMP32_REG(BPF_JSLT, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + BPF_JMP32_IMM(BPF_JSLT, BPF_REG_7, -1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, +}, diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c new file mode 100644 index 000000000000..7e14037acfaf --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/jset.c @@ -0,0 +1,165 @@ +{ + "jset: functional", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + + /* reg, bit 63 or bit 0 set, taken */ + BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001), + BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_EXIT_INSN(), + + /* reg, bit 62, not taken */ + BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000), + BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + + /* imm, any bit set, taken */ + BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1), + BPF_EXIT_INSN(), + + /* imm, bit 31 set, taken */ + BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1), + BPF_EXIT_INSN(), + + /* all good - return r0 == 2 */ + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .runs = 7, + .retvals = { + { .retval = 2, + .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), } + }, + { .retval = 2, + .data64 = { (1ULL << 63) | (1U << 31), } + }, + { .retval = 2, + .data64 = { (1ULL << 31) | (1U << 0), } + }, + { .retval = 2, + .data64 = { (__u32)-1, } + }, + { .retval = 2, + .data64 = { ~0x4000000000000000ULL, } + }, + { .retval = 0, + .data64 = { 0, } + }, + { .retval = 0, + .data64 = { ~0ULL, } + }, + }, +}, +{ + "jset: sign-extend", + .insns = { + BPF_DIRECT_PKT_R2, + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0), + + BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1), + BPF_EXIT_INSN(), + + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 2, + .data = { 1, 0, 0, 0, 0, 0, 0, 1, }, +}, +{ + "jset: known const compare", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .retval_unpriv = 1, + .result_unpriv = ACCEPT, + .retval = 1, + .result = ACCEPT, +}, +{ + "jset: known const compare bad", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "!read_ok", + .result_unpriv = REJECT, + .errstr = "!read_ok", + .result = REJECT, +}, +{ + "jset: unknown const compare taken", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "!read_ok", + .result_unpriv = REJECT, + .errstr = "!read_ok", + .result = REJECT, +}, +{ + "jset: unknown const compare not taken", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .errstr_unpriv = "!read_ok", + .result_unpriv = REJECT, + .errstr = "!read_ok", + .result = REJECT, +}, +{ + "jset: half-known const compare", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2), + BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .result_unpriv = ACCEPT, + .result = ACCEPT, +}, +{ + "jset: range", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff), + BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3), + BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .result_unpriv = ACCEPT, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/ld_abs.c b/tools/testing/selftests/bpf/verifier/ld_abs.c index 287b6fe629fa..f6599d2ec22d 100644 --- a/tools/testing/selftests/bpf/verifier/ld_abs.c +++ b/tools/testing/selftests/bpf/verifier/ld_abs.c @@ -93,3 +93,194 @@ .result = ACCEPT, .retval = 42 /* ultimate return value */, }, +{ + "ld_abs: invalid op 1", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_DW, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "unknown opcode", +}, +{ + "ld_abs: invalid op 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_0, 256), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_IND(BPF_DW, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .errstr = "unknown opcode", +}, +{ + "ld_abs: nmap reduced", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 28), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 26), + BPF_MOV32_IMM(BPF_REG_0, 18), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -64), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -64), + BPF_LD_IND(BPF_W, BPF_REG_7, 14), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -60), + BPF_MOV32_IMM(BPF_REG_0, 280971478), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -60), + BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 15), + BPF_LD_ABS(BPF_H, 12), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x806, 13), + BPF_MOV32_IMM(BPF_REG_0, 22), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -56), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -56), + BPF_LD_IND(BPF_H, BPF_REG_7, 14), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -52), + BPF_MOV32_IMM(BPF_REG_0, 17366), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -48), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_10, -48), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -52), + BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV32_IMM(BPF_REG_0, 256), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .data = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 256, +}, +{ + "ld_abs: div + abs, test 1", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, 3), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2), + BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0), + BPF_LD_ABS(BPF_B, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), + BPF_LD_IND(BPF_B, BPF_REG_8, -70), + BPF_EXIT_INSN(), + }, + .data = { + 10, 20, 30, 40, 50, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 10, +}, +{ + "ld_abs: div + abs, test 2", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, 3), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_2, 2), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_2), + BPF_ALU64_REG(BPF_MOV, BPF_REG_8, BPF_REG_0), + BPF_LD_ABS(BPF_B, 128), + BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), + BPF_LD_IND(BPF_B, BPF_REG_8, -70), + BPF_EXIT_INSN(), + }, + .data = { + 10, 20, 30, 40, 50, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "ld_abs: div + abs, test 3", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), + BPF_LD_ABS(BPF_B, 3), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .data = { + 10, 20, 30, 40, 50, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "ld_abs: div + abs, test 4", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), + BPF_LD_ABS(BPF_B, 256), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .data = { + 10, 20, 30, 40, 50, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0, +}, +{ + "ld_abs: vlan + abs, test 1", + .insns = { }, + .data = { + 0x34, + }, + .fill_helper = bpf_fill_ld_abs_vlan_push_pop, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 0xbef, +}, +{ + "ld_abs: vlan + abs, test 2", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_skb_vlan_push), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .data = { + 0x34, + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 42, +}, +{ + "ld_abs: jump around ld_abs", + .insns = { }, + .data = { + 10, 11, + }, + .fill_helper = bpf_fill_jump_around_ld_abs, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 10, +}, diff --git a/tools/testing/selftests/bpf/verifier/ld_dw.c b/tools/testing/selftests/bpf/verifier/ld_dw.c new file mode 100644 index 000000000000..d2c75b889598 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ld_dw.c @@ -0,0 +1,36 @@ +{ + "ld_dw: xor semi-random 64 bit imms, test 1", + .insns = { }, + .data = { }, + .fill_helper = bpf_fill_rand_ld_dw, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 4090, +}, +{ + "ld_dw: xor semi-random 64 bit imms, test 2", + .insns = { }, + .data = { }, + .fill_helper = bpf_fill_rand_ld_dw, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 2047, +}, +{ + "ld_dw: xor semi-random 64 bit imms, test 3", + .insns = { }, + .data = { }, + .fill_helper = bpf_fill_rand_ld_dw, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 511, +}, +{ + "ld_dw: xor semi-random 64 bit imms, test 4", + .insns = { }, + .data = { }, + .fill_helper = bpf_fill_rand_ld_dw, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 5, +}, diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c new file mode 100644 index 000000000000..cd26ee6b7b1d --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -0,0 +1,100 @@ +{ + "calls: two calls returning different map pointers for lookup (hash, array)", + .insns = { + /* main prog */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_CALL_REL(11), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_CALL_REL(12), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + /* subprog 1 */ + BPF_LD_MAP_FD(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* subprog 2 */ + BPF_LD_MAP_FD(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_hash_48b = { 13 }, + .fixup_map_array_48b = { 16 }, + .result = ACCEPT, + .retval = 1, +}, +{ + "calls: two calls returning different map pointers for lookup (hash, map in map)", + .insns = { + /* main prog */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), + BPF_CALL_REL(11), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_CALL_REL(12), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + /* subprog 1 */ + BPF_LD_MAP_FD(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* subprog 2 */ + BPF_LD_MAP_FD(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .fixup_map_in_map = { 16 }, + .fixup_map_array_48b = { 13 }, + .result = REJECT, + .errstr = "R0 invalid mem access 'map_ptr'", +}, +{ + "cond: two branches returning different map pointers for lookup (tail, tail)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 5 }, + .fixup_prog2 = { 2 }, + .result_unpriv = REJECT, + .errstr_unpriv = "tail_call abusing map_ptr", + .result = ACCEPT, + .retval = 42, +}, +{ + "cond: two branches returning same map pointers for lookup (tail, tail)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, mark)), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog2 = { 2, 5 }, + .result_unpriv = ACCEPT, + .result = ACCEPT, + .retval = 42, +}, diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c new file mode 100644 index 000000000000..6e1358c544fd --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/masking.c @@ -0,0 +1,322 @@ +{ + "masking, test out of bounds 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 5), + BPF_MOV32_IMM(BPF_REG_2, 5 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 3", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 4", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 5", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 6", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 5), + BPF_MOV32_IMM(BPF_REG_2, 5 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 8", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 9", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 10", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 11", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test out of bounds 12", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, -1), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test in bounds 1", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 4), + BPF_MOV32_IMM(BPF_REG_2, 5 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 4, +}, +{ + "masking, test in bounds 2", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test in bounds 3", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe), + BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0xfffffffe, +}, +{ + "masking, test in bounds 4", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0xabcde), + BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0xabcde, +}, +{ + "masking, test in bounds 5", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_MOV32_IMM(BPF_REG_2, 1 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "masking, test in bounds 6", + .insns = { + BPF_MOV32_IMM(BPF_REG_1, 46), + BPF_MOV32_IMM(BPF_REG_2, 47 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 46, +}, +{ + "masking, test in bounds 7", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, -46), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), + BPF_MOV32_IMM(BPF_REG_2, 47 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 46, +}, +{ + "masking, test in bounds 8", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, -47), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), + BPF_MOV32_IMM(BPF_REG_2, 47 - 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), + BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), + BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), + BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c new file mode 100644 index 000000000000..205292b8dd65 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/meta_access.c @@ -0,0 +1,235 @@ +{ + "meta access, test1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet, off=-8", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test3", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test5", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), + BPF_MOV64_IMM(BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R3 !read_ok", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test7", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test8", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test9", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test10", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_IMM(BPF_REG_5, 42), + BPF_MOV64_IMM(BPF_REG_6, 24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test11", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_IMM(BPF_REG_5, 42), + BPF_MOV64_IMM(BPF_REG_6, 24), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), + BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "meta access, test12", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c new file mode 100644 index 000000000000..dc2cc823df2b --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -0,0 +1,607 @@ +{ + "reference tracking: leak potential reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking: leak potential reference on stack", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking: leak potential reference on stack 2", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking: zero potential reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking: copy and zero potential references", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking: release reference without check", + .insns = { + BPF_SK_LOOKUP, + /* reference in r0 may be NULL */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=sock_or_null expected=sock", + .result = REJECT, +}, +{ + "reference tracking: release reference", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: release reference 2", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: release reference twice", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, +}, +{ + "reference tracking: release reference twice inside branch", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, +}, +{ + "reference tracking: alloc, check, free in one subbranch", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), + /* if (offsetof(skb, mark) > data_len) exit; */ + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, + offsetof(struct __sk_buff, mark)), + BPF_SK_LOOKUP, + BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ + /* Leak reference in R0 */ + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "reference tracking: alloc, check, free in both subbranches", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), + /* if (offsetof(skb, mark) > data_len) exit; */ + BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, + offsetof(struct __sk_buff, mark)), + BPF_SK_LOOKUP, + BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "reference tracking in call: free reference in subprog", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking in call: free reference in subprog and outside", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "type=inv expected=sock", + .result = REJECT, +}, +{ + "reference tracking in call: alloc & leak reference in subprog", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), + BPF_SK_LOOKUP, + /* spill unchecked sk_ptr into stack of caller */ + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking in call: alloc in subprog, release outside", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), /* return sk */ + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = POINTER_VALUE, + .result = ACCEPT, +}, +{ + "reference tracking in call: sk_ptr leak into caller stack", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), + /* spill unchecked sk_ptr into stack of caller */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "Unreleased reference", + .result = REJECT, +}, +{ + "reference tracking in call: sk_ptr spill into caller stack", + .insns = { + BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + + /* subprog 1 */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), + /* spill unchecked sk_ptr into stack of caller */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + /* now the sk_ptr is verified, free the reference */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + + /* subprog 2 */ + BPF_SK_LOOKUP, + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: allow LD_ABS", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: forbid LD_ABS while holding reference", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_LD_ABS(BPF_B, 0), + BPF_LD_ABS(BPF_H, 0), + BPF_LD_ABS(BPF_W, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", + .result = REJECT, +}, +{ + "reference tracking: allow LD_IND", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .retval = 1, +}, +{ + "reference tracking: forbid LD_IND while holding reference", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_7, 1), + BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", + .result = REJECT, +}, +{ + "reference tracking: check reference or tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_SK_LOOKUP, + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 17 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: release reference then tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_SK_LOOKUP, + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 18 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: leak possible reference over tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + /* Look up socket and store in REG_6 */ + BPF_SK_LOOKUP, + /* bpf_tail_call() */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + /* if (sk) bpf_sk_release() */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 16 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "tail_call would lead to reference leak", + .result = REJECT, +}, +{ + "reference tracking: leak checked reference over tail call", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + /* Look up socket and store in REG_6 */ + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + /* if (!sk) goto end */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + /* bpf_tail_call() */ + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .fixup_prog1 = { 17 }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "tail_call would lead to reference leak", + .result = REJECT, +}, +{ + "reference tracking: mangle and release sock_or_null", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R1 pointer arithmetic on sock_or_null prohibited", + .result = REJECT, +}, +{ + "reference tracking: mangle and release sock", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "R1 pointer arithmetic on sock prohibited", + .result = REJECT, +}, +{ + "reference tracking: access member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, +{ + "reference tracking: write to member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_LD_IMM64(BPF_REG_2, 42), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2, + offsetof(struct bpf_sock, mark)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LD_IMM64(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "cannot write into socket", + .result = REJECT, +}, +{ + "reference tracking: invalid 64-bit access of member", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "invalid bpf_sock access off=0 size=8", + .result = REJECT, +}, +{ + "reference tracking: access after release", + .insns = { + BPF_SK_LOOKUP, + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .errstr = "!read_ok", + .result = REJECT, +}, +{ + "reference tracking: direct access for lookup", + .insns = { + /* Check that the packet is at least 64B long */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), + /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ + BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_EMIT_CALL(BPF_FUNC_sk_release), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, +}, diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 33e2ea916e2c..7e50cb80873a 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -81,3 +81,76 @@ .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "search pruning: all branches should be verified (nop operation)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_A(1), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "R6 invalid mem access 'inv'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "search pruning: all branches should be verified (invalid stack access)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), + BPF_JMP_A(1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .errstr = "invalid read from stack off -16+0 size 8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "allocated_stack", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8), + BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9), + BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = ACCEPT, + .insn_processed = 15, +}, diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c new file mode 100644 index 000000000000..c5de2e62cc8b --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/xadd.c @@ -0,0 +1,97 @@ +{ + "xadd/w check unaligned stack", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "misaligned stack access off", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "xadd/w check unaligned map", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 3 }, + .result = REJECT, + .errstr = "misaligned value access off", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ + "xadd/w check unaligned pkt", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 99), + BPF_JMP_IMM(BPF_JA, 0, 0, 6), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), + BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1), + BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "BPF_XADD stores into R2 pkt is not allowed", + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "xadd/w check whether src/dst got mangled, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 3, +}, +{ + "xadd/w check whether src/dst got mangled, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8), + BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .retval = 3, +}, diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c new file mode 100644 index 000000000000..5ac390508139 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/xdp.c @@ -0,0 +1,14 @@ +{ + "XDP, using ifindex from netdev", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, ingress_ifindex)), + BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .retval = 1, +}, diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c new file mode 100644 index 000000000000..bfb97383e6b5 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c @@ -0,0 +1,900 @@ +{ + "XDP pkt read, pkt_end mangling, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R3 pointer arithmetic on pkt_end", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "XDP pkt read, pkt_end mangling, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R3 pointer arithmetic on pkt_end", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', bad access 1", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', bad access 2", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, From cdd7b40642582f5130ea5ec20432175a70c8933e Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 25 Jan 2019 10:46:34 +0800 Subject: [PATCH 64/68] selftests: bpf: remove duplicated include Remove duplicated include. Signed-off-by: YueHaibing Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_flow_dissector.c | 2 -- tools/testing/selftests/bpf/test_maps.c | 1 - tools/testing/selftests/bpf/test_sockmap.c | 1 - 3 files changed, 4 deletions(-) diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c index 12b784afba31..01f0c634d548 100644 --- a/tools/testing/selftests/bpf/test_flow_dissector.c +++ b/tools/testing/selftests/bpf/test_flow_dissector.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -25,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index e2b9eee37187..1dfef77cff6f 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -633,7 +633,6 @@ static void test_stackmap(int task, void *data) close(fd); } -#include #include #include #include diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index e85a771f607b..3845144e2c91 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include From d76198b0bc5a67c14c5209bac82481399a3387da Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 28 Jan 2019 10:29:15 -0800 Subject: [PATCH 65/68] tools: bpftool: warn about risky prog array updates When prog array is updated with bpftool users often refer to the map via the ID. Unfortunately, that's likely to lead to confusion because prog arrays get flushed when the last user reference is gone. If there is no other reference bpftool will create one, update successfully just to close the map again and have it flushed. Warn about this case in non-JSON mode. If the problem continues causing confusion we can remove the support for referring to a map by ID for prog array update completely. For now it seems like the potential inconvenience to users who know what they're doing outweighs the benefit. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/bpf/bpftool/map.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index f15c520195b7..2160a8ef17e5 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -426,6 +426,9 @@ static int parse_elem(char **argv, struct bpf_map_info *info, p_err("not enough value arguments for map of progs"); return -1; } + if (is_prefix(*argv, "id")) + p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n" + " by some process or pinned otherwise update will be lost"); fd = prog_parse_fd(&argc, &argv); if (fd < 0) From c8aa703822bf811269975cf7251b5eaad4c38e9c Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 08:53:53 -0800 Subject: [PATCH 66/68] net/flow_dissector: move bpf case into __skb_flow_bpf_dissect This way, we can reuse it for flow dissector in BPF_PROG_TEST_RUN. No functional changes. Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/skbuff.h | 5 +++ net/core/flow_dissector.c | 92 +++++++++++++++++++++++---------------- 2 files changed, 59 insertions(+), 38 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..be762fc34ff3 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1221,6 +1221,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) } #endif +struct bpf_flow_keys; +bool __skb_flow_bpf_dissect(struct bpf_prog *prog, + const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + struct bpf_flow_keys *flow_keys); bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9f2840510e63..bb1a54747d64 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -683,6 +683,46 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, } } +bool __skb_flow_bpf_dissect(struct bpf_prog *prog, + const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + struct bpf_flow_keys *flow_keys) +{ + struct bpf_skb_data_end cb_saved; + struct bpf_skb_data_end *cb; + u32 result; + + /* Note that even though the const qualifier is discarded + * throughout the execution of the BPF program, all changes(the + * control block) are reverted after the BPF program returns. + * Therefore, __skb_flow_dissect does not alter the skb. + */ + + cb = (struct bpf_skb_data_end *)skb->cb; + + /* Save Control Block */ + memcpy(&cb_saved, cb, sizeof(cb_saved)); + memset(cb, 0, sizeof(*cb)); + + /* Pass parameters to the BPF program */ + memset(flow_keys, 0, sizeof(*flow_keys)); + cb->qdisc_cb.flow_keys = flow_keys; + flow_keys->nhoff = skb_network_offset(skb); + flow_keys->thoff = flow_keys->nhoff; + + bpf_compute_data_pointers((struct sk_buff *)skb); + result = BPF_PROG_RUN(prog, skb); + + /* Restore state */ + memcpy(cb, &cb_saved, sizeof(cb_saved)); + + flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len); + flow_keys->thoff = clamp_t(u16, flow_keys->thoff, + flow_keys->nhoff, skb->len); + + return result == BPF_OK; +} + /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified @@ -714,7 +754,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_vlan *key_vlan; enum flow_dissect_ret fdret; enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; - struct bpf_prog *attached = NULL; int num_hdrs = 0; u8 ip_proto = 0; bool ret; @@ -754,53 +793,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb, FLOW_DISSECTOR_KEY_BASIC, target_container); - rcu_read_lock(); if (skb) { + struct bpf_flow_keys flow_keys; + struct bpf_prog *attached = NULL; + + rcu_read_lock(); + if (skb->dev) attached = rcu_dereference(dev_net(skb->dev)->flow_dissector_prog); else if (skb->sk) attached = rcu_dereference(sock_net(skb->sk)->flow_dissector_prog); else WARN_ON_ONCE(1); - } - if (attached) { - /* Note that even though the const qualifier is discarded - * throughout the execution of the BPF program, all changes(the - * control block) are reverted after the BPF program returns. - * Therefore, __skb_flow_dissect does not alter the skb. - */ - struct bpf_flow_keys flow_keys = {}; - struct bpf_skb_data_end cb_saved; - struct bpf_skb_data_end *cb; - u32 result; - cb = (struct bpf_skb_data_end *)skb->cb; - - /* Save Control Block */ - memcpy(&cb_saved, cb, sizeof(cb_saved)); - memset(cb, 0, sizeof(cb_saved)); - - /* Pass parameters to the BPF program */ - cb->qdisc_cb.flow_keys = &flow_keys; - flow_keys.nhoff = nhoff; - flow_keys.thoff = nhoff; - - bpf_compute_data_pointers((struct sk_buff *)skb); - result = BPF_PROG_RUN(attached, skb); - - /* Restore state */ - memcpy(cb, &cb_saved, sizeof(cb_saved)); - - flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len); - flow_keys.thoff = clamp_t(u16, flow_keys.thoff, - flow_keys.nhoff, skb->len); - - __skb_flow_bpf_to_target(&flow_keys, flow_dissector, - target_container); + if (attached) { + ret = __skb_flow_bpf_dissect(attached, skb, + flow_dissector, + &flow_keys); + __skb_flow_bpf_to_target(&flow_keys, flow_dissector, + target_container); + rcu_read_unlock(); + return ret; + } rcu_read_unlock(); - return result == BPF_OK; } - rcu_read_unlock(); if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { From b7a1848e8398b8396c990279e6a10272d818577e Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 08:53:54 -0800 Subject: [PATCH 67/68] bpf: add BPF_PROG_TEST_RUN support for flow dissector The input is packet data, the output is struct bpf_flow_key. This should make it easy to test flow dissector programs without elaborate setup. Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 3 ++ net/bpf/test_run.c | 82 +++++++++++++++++++++++++++++++++++++++++++++ net/core/filter.c | 1 + 3 files changed, 86 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3851529062ec..0394f1f9213b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -404,6 +404,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr); /* an array of programs to be executed under rcu_lock. * diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fa2644d276ef..2c5172b33209 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -240,3 +240,85 @@ out: kfree(data); return ret; } + +int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, + const union bpf_attr *kattr, + union bpf_attr __user *uattr) +{ + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + struct bpf_flow_keys flow_keys; + u64 time_start, time_spent = 0; + struct bpf_skb_data_end *cb; + u32 retval, duration; + struct sk_buff *skb; + struct sock *sk; + void *data; + int ret; + u32 i; + + if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) + return -EINVAL; + + data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); + if (IS_ERR(data)) + return PTR_ERR(data); + + sk = kzalloc(sizeof(*sk), GFP_USER); + if (!sk) { + kfree(data); + return -ENOMEM; + } + sock_net_set(sk, current->nsproxy->net_ns); + sock_init_data(NULL, sk); + + skb = build_skb(data, 0); + if (!skb) { + kfree(data); + kfree(sk); + return -ENOMEM; + } + skb->sk = sk; + + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + __skb_put(skb, size); + skb->protocol = eth_type_trans(skb, + current->nsproxy->net_ns->loopback_dev); + skb_reset_network_header(skb); + + cb = (struct bpf_skb_data_end *)skb->cb; + cb->qdisc_cb.flow_keys = &flow_keys; + + if (!repeat) + repeat = 1; + + time_start = ktime_get_ns(); + for (i = 0; i < repeat; i++) { + preempt_disable(); + rcu_read_lock(); + retval = __skb_flow_bpf_dissect(prog, skb, + &flow_keys_dissector, + &flow_keys); + rcu_read_unlock(); + preempt_enable(); + + if (need_resched()) { + if (signal_pending(current)) + break; + time_spent += ktime_get_ns() - time_start; + cond_resched(); + time_start = ktime_get_ns(); + } + } + time_spent += ktime_get_ns() - time_start; + do_div(time_spent, repeat); + duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; + + ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), + retval, duration); + + kfree_skb(skb); + kfree(sk); + return ret; +} diff --git a/net/core/filter.c b/net/core/filter.c index 8e587dd1da20..8ce421796ac6 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7711,6 +7711,7 @@ const struct bpf_verifier_ops flow_dissector_verifier_ops = { }; const struct bpf_prog_ops flow_dissector_prog_ops = { + .test_run = bpf_prog_test_run_flow_dissector, }; int sk_detach_filter(struct sock *sk) From bf0f0fd939451a88e8b021ad8829da2815651980 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 28 Jan 2019 08:53:55 -0800 Subject: [PATCH 68/68] selftests/bpf: add simple BPF_PROG_TEST_RUN examples for flow dissector Use existing pkt_v4 and pkt_v6 to make sure flow_keys are what we want. Also, add new bpf_flow_load routine (and flow_dissector_load.h header) that loads bpf_flow.o program and does all required setup. Signed-off-by: Stanislav Fomichev Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/Makefile | 3 + .../selftests/bpf/flow_dissector_load.c | 43 ++-------- .../selftests/bpf/flow_dissector_load.h | 55 +++++++++++++ tools/testing/selftests/bpf/test_progs.c | 78 ++++++++++++++++++- 4 files changed, 139 insertions(+), 40 deletions(-) create mode 100644 tools/testing/selftests/bpf/flow_dissector_load.h diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 89b0d1799ff3..c566090e5657 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -148,6 +148,9 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline $(OUTPUT)/test_queue_map.o: test_queue_stack_map.h $(OUTPUT)/test_stack_map.o: test_queue_stack_map.h +$(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h +$(OUTPUT)/test_progs.o: flow_dissector_load.h + BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris) BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF) BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm') diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c index ae8180b11d5f..77cafa66d048 100644 --- a/tools/testing/selftests/bpf/flow_dissector_load.c +++ b/tools/testing/selftests/bpf/flow_dissector_load.c @@ -12,6 +12,7 @@ #include #include "bpf_rlimit.h" +#include "flow_dissector_load.h" const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector"; const char *cfg_map_name = "jmp_table"; @@ -21,46 +22,13 @@ char *cfg_path_name; static void load_and_attach_program(void) { - struct bpf_program *prog, *main_prog; - struct bpf_map *prog_array; - int i, fd, prog_fd, ret; + int prog_fd, ret; struct bpf_object *obj; - int prog_array_fd; - ret = bpf_prog_load(cfg_path_name, BPF_PROG_TYPE_FLOW_DISSECTOR, &obj, - &prog_fd); + ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, + cfg_map_name, &prog_fd); if (ret) - error(1, 0, "bpf_prog_load %s", cfg_path_name); - - main_prog = bpf_object__find_program_by_title(obj, cfg_section_name); - if (!main_prog) - error(1, 0, "bpf_object__find_program_by_title %s", - cfg_section_name); - - prog_fd = bpf_program__fd(main_prog); - if (prog_fd < 0) - error(1, 0, "bpf_program__fd"); - - prog_array = bpf_object__find_map_by_name(obj, cfg_map_name); - if (!prog_array) - error(1, 0, "bpf_object__find_map_by_name %s", cfg_map_name); - - prog_array_fd = bpf_map__fd(prog_array); - if (prog_array_fd < 0) - error(1, 0, "bpf_map__fd %s", cfg_map_name); - - i = 0; - bpf_object__for_each_program(prog, obj) { - fd = bpf_program__fd(prog); - if (fd < 0) - error(1, 0, "bpf_program__fd"); - - if (fd != prog_fd) { - printf("%d: %s\n", i, bpf_program__title(prog, false)); - bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY); - ++i; - } - } + error(1, 0, "bpf_flow_load %s", cfg_path_name); ret = bpf_prog_attach(prog_fd, 0 /* Ignore */, BPF_FLOW_DISSECTOR, 0); if (ret) @@ -69,7 +37,6 @@ static void load_and_attach_program(void) ret = bpf_object__pin(obj, cfg_pin_path); if (ret) error(1, 0, "bpf_object__pin %s", cfg_pin_path); - } static void detach_program(void) diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h new file mode 100644 index 000000000000..41dd6959feb0 --- /dev/null +++ b/tools/testing/selftests/bpf/flow_dissector_load.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +#ifndef FLOW_DISSECTOR_LOAD +#define FLOW_DISSECTOR_LOAD + +#include +#include + +static inline int bpf_flow_load(struct bpf_object **obj, + const char *path, + const char *section_name, + const char *map_name, + int *prog_fd) +{ + struct bpf_program *prog, *main_prog; + struct bpf_map *prog_array; + int prog_array_fd; + int ret, fd, i; + + ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj, + prog_fd); + if (ret) + return ret; + + main_prog = bpf_object__find_program_by_title(*obj, section_name); + if (!main_prog) + return ret; + + *prog_fd = bpf_program__fd(main_prog); + if (*prog_fd < 0) + return ret; + + prog_array = bpf_object__find_map_by_name(*obj, map_name); + if (!prog_array) + return ret; + + prog_array_fd = bpf_map__fd(prog_array); + if (prog_array_fd < 0) + return ret; + + i = 0; + bpf_object__for_each_program(prog, *obj) { + fd = bpf_program__fd(prog); + if (fd < 0) + return fd; + + if (fd != *prog_fd) { + bpf_map_update_elem(prog_array_fd, &i, &fd, BPF_ANY); + ++i; + } + } + + return 0; +} + +#endif /* FLOW_DISSECTOR_LOAD */ diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 126fc624290d..5f46680d4ad4 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -39,6 +39,7 @@ typedef __u16 __sum16; #include "bpf_endian.h" #include "bpf_rlimit.h" #include "trace_helpers.h" +#include "flow_dissector_load.h" static int error_cnt, pass_cnt; static bool jit_enabled; @@ -53,9 +54,10 @@ static struct { } __packed pkt_v4 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IP), .iph.ihl = 5, - .iph.protocol = 6, + .iph.protocol = IPPROTO_TCP, .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.urg_ptr = 123, + .tcp.doff = 5, }; /* ipv6 test vector */ @@ -65,9 +67,10 @@ static struct { struct tcphdr tcp; } __packed pkt_v6 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), - .iph.nexthdr = 6, + .iph.nexthdr = IPPROTO_TCP, .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.urg_ptr = 123, + .tcp.doff = 5, }; #define _CHECK(condition, tag, duration, format...) ({ \ @@ -1882,6 +1885,76 @@ out: bpf_object__close(obj); } +#define CHECK_FLOW_KEYS(desc, got, expected) \ + CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ + desc, \ + "nhoff=%u/%u " \ + "thoff=%u/%u " \ + "addr_proto=0x%x/0x%x " \ + "is_frag=%u/%u " \ + "is_first_frag=%u/%u " \ + "is_encap=%u/%u " \ + "n_proto=0x%x/0x%x " \ + "sport=%u/%u " \ + "dport=%u/%u\n", \ + got.nhoff, expected.nhoff, \ + got.thoff, expected.thoff, \ + got.addr_proto, expected.addr_proto, \ + got.is_frag, expected.is_frag, \ + got.is_first_frag, expected.is_first_frag, \ + got.is_encap, expected.is_encap, \ + got.n_proto, expected.n_proto, \ + got.sport, expected.sport, \ + got.dport, expected.dport) + +static struct bpf_flow_keys pkt_v4_flow_keys = { + .nhoff = 0, + .thoff = sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = bpf_htons(ETH_P_IP), +}; + +static struct bpf_flow_keys pkt_v6_flow_keys = { + .nhoff = 0, + .thoff = sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = bpf_htons(ETH_P_IPV6), +}; + +static void test_flow_dissector(void) +{ + struct bpf_flow_keys flow_keys; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + __u32 size; + + err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", + "jmp_table", &prog_fd); + if (err) { + error_cnt++; + return; + } + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv4", + "err %d errno %d retval %d duration %d size %u/%lu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + CHECK_FLOW_KEYS("ipv4_flow_keys", flow_keys, pkt_v4_flow_keys); + + err = bpf_prog_test_run(prog_fd, 10, &pkt_v6, sizeof(pkt_v6), + &flow_keys, &size, &retval, &duration); + CHECK(size != sizeof(flow_keys) || err || retval != 1, "ipv6", + "err %d errno %d retval %d duration %d size %u/%lu\n", + err, errno, retval, duration, size, sizeof(flow_keys)); + CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); + + bpf_object__close(obj); +} + int main(void) { srand(time(NULL)); @@ -1909,6 +1982,7 @@ int main(void) test_reference_tracking(); test_queue_stack_map(QUEUE); test_queue_stack_map(STACK); + test_flow_dissector(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;