libbpf: Implement type-based CO-RE relocations support

Implement support for TYPE_EXISTS/TYPE_SIZE/TYPE_ID_LOCAL/TYPE_ID_REMOTE
relocations. These are examples of type-based relocations, as opposed to
field-based relocations supported already. The difference is that they are
calculating relocation values based on the type itself, not a field within
a struct/union.

Type-based relos have slightly different semantics when matching local types
to kernel target types, see comments in bpf_core_types_are_compat() for
details. Their behavior on failure to find target type in kernel BTF also
differs. Instead of "poisoning" relocatable instruction and failing load
subsequently in kernel, they return 0 (which is rarely a valid return result,
so user BPF code can use that to detect success/failure of the relocation and
deal with it without extra "guarding" relocations). Also, it's always possible
to check existence of the type in target kernel with TYPE_EXISTS relocation,
similarly to a field-based FIELD_EXISTS.

TYPE_ID_LOCAL relocation is a bit special in that it always succeeds (barring
any libbpf/Clang bugs) and resolved to BTF ID using **local** BTF info of BPF
program itself. Tests in subsequent patches demonstrate the usage and
semantics of new relocations.

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200819194519.3375898-2-andriin@fb.com
This commit is contained in:
Andrii Nakryiko 2020-08-19 12:45:15 -07:00 коммит произвёл Alexei Starovoitov
Родитель defcffeb51
Коммит 3fc32f40c4
3 изменённых файлов: 263 добавлений и 24 удалений

Просмотреть файл

@ -19,6 +19,18 @@ enum bpf_field_info_kind {
BPF_FIELD_RSHIFT_U64 = 5, BPF_FIELD_RSHIFT_U64 = 5,
}; };
/* second argument to __builtin_btf_type_id() built-in */
enum bpf_type_id_kind {
BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */
BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */
};
/* second argument to __builtin_preserve_type_info() built-in */
enum bpf_type_info_kind {
BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
BPF_TYPE_SIZE = 1, /* type size in target kernel */
};
#define __CORE_RELO(src, field, info) \ #define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info) __builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
@ -94,12 +106,50 @@ enum bpf_field_info_kind {
__builtin_preserve_field_info(field, BPF_FIELD_EXISTS) __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
/* /*
* Convenience macro to get byte size of a field. Works for integers, * Convenience macro to get the byte size of a field. Works for integers,
* struct/unions, pointers, arrays, and enums. * struct/unions, pointers, arrays, and enums.
*/ */
#define bpf_core_field_size(field) \ #define bpf_core_field_size(field) \
__builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
/*
* Convenience macro to get BTF type ID of a specified type, using a local BTF
* information. Return 32-bit unsigned integer with type ID from program's own
* BTF. Always succeeds.
*/
#define bpf_core_type_id_local(type) \
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
/*
* Convenience macro to get BTF type ID of a target kernel's type that matches
* specified local type.
* Returns:
* - valid 32-bit unsigned type ID in kernel BTF;
* - 0, if no matching type was found in a target kernel BTF.
*/
#define bpf_core_type_id_kernel(type) \
__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
/*
* Convenience macro to check that provided named type
* (struct/union/enum/typedef) exists in a target kernel.
* Returns:
* 1, if such type is present in target kernel's BTF;
* 0, if no matching type is found.
*/
#define bpf_core_type_exists(type) \
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
/*
* Convenience macro to get the byte size of a provided named type
* (struct/union/enum/typedef) in a target kernel.
* Returns:
* >= 0 size (in bytes), if type is present in target kernel's BTF;
* 0, if no matching type is found.
*/
#define bpf_core_type_size(type) \
__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
/* /*
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
* offset relocation for source address using __builtin_preserve_access_index() * offset relocation for source address using __builtin_preserve_access_index()

Просмотреть файл

@ -4111,6 +4111,10 @@ static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
case BPF_FIELD_SIGNED: return "signed"; case BPF_FIELD_SIGNED: return "signed";
case BPF_FIELD_LSHIFT_U64: return "lshift_u64"; case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
case BPF_FIELD_RSHIFT_U64: return "rshift_u64"; case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
case BPF_TYPE_ID_LOCAL: return "local_type_id";
case BPF_TYPE_ID_TARGET: return "target_type_id";
case BPF_TYPE_EXISTS: return "type_exists";
case BPF_TYPE_SIZE: return "type_size";
default: return "unknown"; default: return "unknown";
} }
} }
@ -4130,6 +4134,19 @@ static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
} }
} }
static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
{
switch (kind) {
case BPF_TYPE_ID_LOCAL:
case BPF_TYPE_ID_TARGET:
case BPF_TYPE_EXISTS:
case BPF_TYPE_SIZE:
return true;
default:
return false;
}
}
/* /*
* Turn bpf_core_relo into a low- and high-level spec representation, * Turn bpf_core_relo into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting * validating correctness along the way, as well as calculating resulting
@ -4160,6 +4177,9 @@ static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
* - field 'a' access (corresponds to '2' in low-level spec); * - field 'a' access (corresponds to '2' in low-level spec);
* - array element #3 access (corresponds to '3' in low-level spec). * - array element #3 access (corresponds to '3' in low-level spec).
* *
* Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
* TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
* spec and raw_spec are kept empty.
*/ */
static int bpf_core_parse_spec(const struct btf *btf, static int bpf_core_parse_spec(const struct btf *btf,
__u32 type_id, __u32 type_id,
@ -4182,6 +4202,13 @@ static int bpf_core_parse_spec(const struct btf *btf,
spec->root_type_id = type_id; spec->root_type_id = type_id;
spec->relo_kind = relo_kind; spec->relo_kind = relo_kind;
/* type-based relocations don't have a field access string */
if (core_relo_is_type_based(relo_kind)) {
if (strcmp(spec_str, "0"))
return -EINVAL;
return 0;
}
/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */ /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
while (*spec_str) { while (*spec_str) {
if (*spec_str == ':') if (*spec_str == ':')
@ -4317,7 +4344,7 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
const struct btf *targ_btf) const struct btf *targ_btf)
{ {
size_t local_essent_len, targ_essent_len; size_t local_essent_len, targ_essent_len;
const char *local_name, *targ_name, *targ_kind; const char *local_name, *targ_name;
const struct btf_type *t, *local_t; const struct btf_type *t, *local_t;
struct ids_vec *cand_ids; struct ids_vec *cand_ids;
__u32 *new_ids; __u32 *new_ids;
@ -4339,14 +4366,12 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
n = btf__get_nr_types(targ_btf); n = btf__get_nr_types(targ_btf);
for (i = 1; i <= n; i++) { for (i = 1; i <= n; i++) {
t = btf__type_by_id(targ_btf, i); t = btf__type_by_id(targ_btf, i);
if (btf_kind(t) != btf_kind(local_t))
continue;
targ_name = btf__name_by_offset(targ_btf, t->name_off); targ_name = btf__name_by_offset(targ_btf, t->name_off);
if (str_is_empty(targ_name)) if (str_is_empty(targ_name))
continue; continue;
targ_kind = btf_kind_str(t);
t = skip_mods_and_typedefs(targ_btf, i, NULL);
if (!btf_is_composite(t) && !btf_is_array(t))
continue;
targ_essent_len = bpf_core_essential_name_len(targ_name); targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len) if (targ_essent_len != local_essent_len)
@ -4355,7 +4380,7 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
if (strncmp(local_name, targ_name, local_essent_len) == 0) { if (strncmp(local_name, targ_name, local_essent_len) == 0) {
pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n", pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
local_type_id, btf_kind_str(local_t), local_type_id, btf_kind_str(local_t),
local_name, i, targ_kind, targ_name); local_name, i, btf_kind_str(t), targ_name);
new_ids = libbpf_reallocarray(cand_ids->data, new_ids = libbpf_reallocarray(cand_ids->data,
cand_ids->len + 1, cand_ids->len + 1,
sizeof(*cand_ids->data)); sizeof(*cand_ids->data));
@ -4373,8 +4398,9 @@ err_out:
return ERR_PTR(err); return ERR_PTR(err);
} }
/* Check two types for compatibility, skipping const/volatile/restrict and /* Check two types for compatibility for the purpose of field access
* typedefs, to ensure we are relocating compatible entities: * relocation. const/volatile/restrict and typedefs are skipped to ensure we
* are relocating semantically compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed; * - any two STRUCTs/UNIONs are compatible and can be mixed;
* - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible; * - any two PTRs are always compatible;
@ -4529,6 +4555,100 @@ static int bpf_core_match_member(const struct btf *local_btf,
return 0; return 0;
} }
/* Check local and target types for compatibility. This check is used for
* type-based CO-RE relocations and follow slightly different rules than
* field-based relocations. This function assumes that root types were already
* checked for name match. Beyond that initial root-level name check, names
* are completely ignored. Compatibility rules are as follows:
* - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
* kind should match for local and target types (i.e., STRUCT is not
* compatible with UNION);
* - for ENUMs, the size is ignored;
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - CONST/VOLATILE/RESTRICT modifiers are ignored;
* - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
* - FUNC_PROTOs are compatible if they have compatible signature: same
* number of input args and compatible return and argument types.
* These rules are not set in stone and probably will be adjusted as we get
* more experience with using BPF CO-RE relocations.
*/
static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
const struct btf *targ_btf, __u32 targ_id)
{
const struct btf_type *local_type, *targ_type;
int depth = 32; /* max recursion depth */
/* caller made sure that names match (ignoring flavor suffix) */
local_type = btf__type_by_id(local_btf, local_id);
targ_type = btf__type_by_id(local_btf, local_id);
if (btf_kind(local_type) != btf_kind(targ_type))
return 0;
recur:
depth--;
if (depth < 0)
return -EINVAL;
local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
if (!local_type || !targ_type)
return -EINVAL;
if (btf_kind(local_type) != btf_kind(targ_type))
return 0;
switch (btf_kind(local_type)) {
case BTF_KIND_UNKN:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
return 1;
case BTF_KIND_INT:
/* just reject deprecated bitfield-like integers; all other
* integers are by default compatible between each other
*/
return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
case BTF_KIND_PTR:
local_id = local_type->type;
targ_id = targ_type->type;
goto recur;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
goto recur;
case BTF_KIND_FUNC_PROTO: {
struct btf_param *local_p = btf_params(local_type);
struct btf_param *targ_p = btf_params(targ_type);
__u16 local_vlen = btf_vlen(local_type);
__u16 targ_vlen = btf_vlen(targ_type);
int i, err;
if (local_vlen != targ_vlen)
return 0;
for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
if (err <= 0)
return err;
}
/* tail recurse for return type check */
skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
goto recur;
}
default:
pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
btf_kind_str(local_type), local_id, targ_id);
return 0;
}
}
/* /*
* Try to match local spec to a target type and, if successful, produce full * Try to match local spec to a target type and, if successful, produce full
* target spec (high-level, low-level + bit offset). * target spec (high-level, low-level + bit offset).
@ -4547,6 +4667,12 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
targ_spec->root_type_id = targ_id; targ_spec->root_type_id = targ_id;
targ_spec->relo_kind = local_spec->relo_kind; targ_spec->relo_kind = local_spec->relo_kind;
if (core_relo_is_type_based(local_spec->relo_kind)) {
return bpf_core_types_are_compat(local_spec->btf,
local_spec->root_type_id,
targ_btf, targ_id);
}
local_acc = &local_spec->spec[0]; local_acc = &local_spec->spec[0];
targ_acc = &targ_spec->spec[0]; targ_acc = &targ_spec->spec[0];
@ -4720,6 +4846,40 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
return 0; return 0;
} }
static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
const struct bpf_core_spec *spec,
__u32 *val)
{
__s64 sz;
/* type-based relos return zero when target type is not found */
if (!spec) {
*val = 0;
return 0;
}
switch (relo->kind) {
case BPF_TYPE_ID_TARGET:
*val = spec->root_type_id;
break;
case BPF_TYPE_EXISTS:
*val = 1;
break;
case BPF_TYPE_SIZE:
sz = btf__resolve_size(spec->btf, spec->root_type_id);
if (sz < 0)
return -EINVAL;
*val = sz;
break;
case BPF_TYPE_ID_LOCAL:
/* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
default:
return -EOPNOTSUPP;
}
return 0;
}
struct bpf_core_relo_res struct bpf_core_relo_res
{ {
/* expected value in the instruction, unless validate == false */ /* expected value in the instruction, unless validate == false */
@ -4755,6 +4915,9 @@ static int bpf_core_calc_relo(const struct bpf_program *prog,
if (core_relo_is_field_based(relo->kind)) { if (core_relo_is_field_based(relo->kind)) {
err = bpf_core_calc_field_relo(prog, relo, local_spec, &res->orig_val, &res->validate); err = bpf_core_calc_field_relo(prog, relo, local_spec, &res->orig_val, &res->validate);
err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, &res->new_val, NULL); err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec, &res->new_val, NULL);
} else if (core_relo_is_type_based(relo->kind)) {
err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
} }
if (err == -EUCLEAN) { if (err == -EUCLEAN) {
@ -4894,6 +5057,9 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s); libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
if (core_relo_is_type_based(spec->relo_kind))
return;
if (core_relo_is_field_based(spec->relo_kind)) { if (core_relo_is_field_based(spec->relo_kind)) {
for (i = 0; i < spec->len; i++) { for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name) if (spec->spec[i].name)
@ -4911,6 +5077,7 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
spec->bit_offset / 8, spec->bit_offset % 8); spec->bit_offset / 8, spec->bit_offset % 8);
else else
libbpf_print(level, " @ offset %u)", spec->bit_offset / 8); libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
return;
} }
} }
@ -4979,12 +5146,12 @@ static void *u32_as_hash_key(__u32 x)
* between multiple relocations for the same type ID and is updated as some * between multiple relocations for the same type ID and is updated as some
* of the candidates are pruned due to structural incompatibility. * of the candidates are pruned due to structural incompatibility.
*/ */
static int bpf_core_reloc_field(struct bpf_program *prog, static int bpf_core_apply_relo(struct bpf_program *prog,
const struct bpf_core_relo *relo, const struct bpf_core_relo *relo,
int relo_idx, int relo_idx,
const struct btf *local_btf, const struct btf *local_btf,
const struct btf *targ_btf, const struct btf *targ_btf,
struct hashmap *cand_cache) struct hashmap *cand_cache)
{ {
const char *prog_name = bpf_program__title(prog, false); const char *prog_name = bpf_program__title(prog, false);
struct bpf_core_spec local_spec, cand_spec, targ_spec; struct bpf_core_spec local_spec, cand_spec, targ_spec;
@ -5003,7 +5170,7 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
return -EINVAL; return -EINVAL;
local_name = btf__name_by_offset(local_btf, local_type->name_off); local_name = btf__name_by_offset(local_btf, local_type->name_off);
if (str_is_empty(local_name)) if (!local_name)
return -EINVAL; return -EINVAL;
spec_str = btf__name_by_offset(local_btf, relo->access_str_off); spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
@ -5014,7 +5181,8 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
if (err) { if (err) {
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n", pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
prog_name, relo_idx, local_id, btf_kind_str(local_type), prog_name, relo_idx, local_id, btf_kind_str(local_type),
local_name, spec_str, err); str_is_empty(local_name) ? "<anon>" : local_name,
spec_str, err);
return -EINVAL; return -EINVAL;
} }
@ -5023,12 +5191,28 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
libbpf_print(LIBBPF_DEBUG, "\n"); libbpf_print(LIBBPF_DEBUG, "\n");
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
if (relo->kind == BPF_TYPE_ID_LOCAL) {
targ_res.validate = true;
targ_res.poison = false;
targ_res.orig_val = local_spec.root_type_id;
targ_res.new_val = local_spec.root_type_id;
goto patch_insn;
}
/* libbpf doesn't support candidate search for anonymous types */
if (str_is_empty(spec_str)) {
pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
return -EOPNOTSUPP;
}
if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) { if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf); cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
if (IS_ERR(cand_ids)) { if (IS_ERR(cand_ids)) {
pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld", pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
prog_name, relo_idx, local_id, btf_kind_str(local_type), local_name, prog_name, relo_idx, local_id, btf_kind_str(local_type),
PTR_ERR(cand_ids)); local_name, PTR_ERR(cand_ids));
return PTR_ERR(cand_ids); return PTR_ERR(cand_ids);
} }
err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL); err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
@ -5084,7 +5268,7 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
return -EINVAL; return -EINVAL;
} }
cand_ids->data[j++] = cand_spec.spec[0].type_id; cand_ids->data[j++] = cand_spec.root_type_id;
} }
/* /*
@ -5103,7 +5287,7 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
* as well as expected case, depending whether instruction w/ * as well as expected case, depending whether instruction w/
* relocation is guarded in some way that makes it unreachable (dead * relocation is guarded in some way that makes it unreachable (dead
* code) if relocation can't be resolved. This is handled in * code) if relocation can't be resolved. This is handled in
* bpf_core_reloc_insn() uniformly by replacing that instruction with * bpf_core_patch_insn() uniformly by replacing that instruction with
* BPF helper call insn (using invalid helper ID). If that instruction * BPF helper call insn (using invalid helper ID). If that instruction
* is indeed unreachable, then it will be ignored and eliminated by * is indeed unreachable, then it will be ignored and eliminated by
* verifier. If it was an error, then verifier will complain and point * verifier. If it was an error, then verifier will complain and point
@ -5119,6 +5303,7 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
return err; return err;
} }
patch_insn:
/* bpf_core_patch_insn() should know how to handle missing targ_spec */ /* bpf_core_patch_insn() should know how to handle missing targ_spec */
err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res); err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
if (err) { if (err) {
@ -5186,8 +5371,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
sec_name, sec->num_info); sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) { for_each_btf_ext_rec(seg, sec, i, rec) {
err = bpf_core_reloc_field(prog, rec, i, obj->btf, err = bpf_core_apply_relo(prog, rec, i, obj->btf,
targ_btf, cand_cache); targ_btf, cand_cache);
if (err) { if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
sec_name, i, err); sec_name, i, err);

Просмотреть файл

@ -238,6 +238,10 @@ enum bpf_core_relo_kind {
BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */ BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */ BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */ BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */
BPF_TYPE_EXISTS = 8, /* type existence in target kernel */
BPF_TYPE_SIZE = 9, /* type size in bytes */
}; };
/* The minimum bpf_core_relo checked by the loader /* The minimum bpf_core_relo checked by the loader