summaryrefslogtreecommitdiff
path: root/tools/lib/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf')
-rw-r--r--tools/lib/bpf/Makefile26
-rw-r--r--tools/lib/bpf/bpf.c24
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c250
-rw-r--r--tools/lib/bpf/btf.h182
-rw-r--r--tools/lib/bpf/btf_dump.c138
-rw-r--r--tools/lib/bpf/libbpf.c1009
-rw-r--r--tools/lib/bpf/libbpf.h3
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_internal.h105
-rw-r--r--tools/lib/bpf/libbpf_probes.c1
-rw-r--r--tools/lib/bpf/xsk.c86
-rw-r--r--tools/lib/bpf/xsk.h33
13 files changed, 1515 insertions, 349 deletions
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 9312066a1ae3..c6f94cffe06e 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
# Most of this file is copied from tools/lib/traceevent/Makefile
-BPF_VERSION = 0
-BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 4
+LIBBPF_VERSION := $(shell \
+ grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
+ sort -rV | head -n1 | cut -d'_' -f2)
+LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
MAKEFLAGS += --no-print-directory
@@ -79,15 +80,9 @@ export prefix libdir src obj
libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-VERSION = $(BPF_VERSION)
-PATCHLEVEL = $(BPF_PATCHLEVEL)
-EXTRAVERSION = $(BPF_EXTRAVERSION)
-
OBJ = $@
N =
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
-
LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
LIB_FILE = libbpf.a libbpf.so*
PC_FILE = libbpf.pc
@@ -113,6 +108,7 @@ override CFLAGS += -Werror -Wall
override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
+override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ifeq ($(VERBOSE),1)
Q =
@@ -138,7 +134,9 @@ LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE))
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
- awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
+ sort -u | wc -l)
VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
@@ -178,10 +176,10 @@ $(BPF_IN): force elfdep bpfdep
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
- $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
-Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
- @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
+ @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -205,6 +203,7 @@ check_abi: $(OUTPUT)libbpf.so
"Please make sure all LIBBPF_API symbols are" \
"versioned in $(VERSION_SCRIPT)." >&2; \
readelf -s --wide $(OUTPUT)libbpf-in.o | \
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \
sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
readelf -s --wide $(OUTPUT)libbpf.so | \
@@ -257,7 +256,8 @@ config-clean:
clean:
$(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
- *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd *.pc LIBBPF-CFLAGS
+ *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
+ *.pc LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c7d7993c44bb..cbb933532981 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -568,7 +568,7 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
return ret;
}
-int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
+static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{
union bpf_attr attr;
int err;
@@ -576,26 +576,26 @@ int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
memset(&attr, 0, sizeof(attr));
attr.start_id = start_id;
- err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr));
+ err = sys_bpf(cmd, &attr, sizeof(attr));
if (!err)
*next_id = attr.next_id;
return err;
}
-int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
+int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
{
- union bpf_attr attr;
- int err;
-
- memset(&attr, 0, sizeof(attr));
- attr.start_id = start_id;
+ return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
+}
- err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr));
- if (!err)
- *next_id = attr.next_id;
+int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
+}
- return err;
+int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
+{
+ return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
}
int bpf_prog_get_fd_by_id(__u32 id)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index ff42ca043dc8..0db01334740f 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -156,6 +156,7 @@ LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
__u32 *retval, __u32 *duration);
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
+LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d821107f55f9..1aa189a9112a 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -20,13 +20,6 @@
#define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff
-#define IS_MODIFIER(k) (((k) == BTF_KIND_TYPEDEF) || \
- ((k) == BTF_KIND_VOLATILE) || \
- ((k) == BTF_KIND_CONST) || \
- ((k) == BTF_KIND_RESTRICT))
-
-#define IS_VAR(k) ((k) == BTF_KIND_VAR)
-
static struct btf_type btf_void;
struct btf {
@@ -43,47 +36,6 @@ struct btf {
int fd;
};
-struct btf_ext_info {
- /*
- * info points to the individual info section (e.g. func_info and
- * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
- */
- void *info;
- __u32 rec_size;
- __u32 len;
-};
-
-struct btf_ext {
- union {
- struct btf_ext_header *hdr;
- void *data;
- };
- struct btf_ext_info func_info;
- struct btf_ext_info line_info;
- __u32 data_size;
-};
-
-struct btf_ext_info_sec {
- __u32 sec_name_off;
- __u32 num_info;
- /* Followed by num_info * record_size number of bytes */
- __u8 data[0];
-};
-
-/* The minimum bpf_func_info checked by the loader */
-struct bpf_func_info_min {
- __u32 insn_off;
- __u32 type_id;
-};
-
-/* The minimum bpf_line_info checked by the loader */
-struct bpf_line_info_min {
- __u32 insn_off;
- __u32 file_name_off;
- __u32 line_off;
- __u32 line_col;
-};
-
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
@@ -193,9 +145,9 @@ static int btf_parse_str_sec(struct btf *btf)
static int btf_type_size(struct btf_type *t)
{
int base_size = sizeof(struct btf_type);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ __u16 vlen = btf_vlen(t);
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_FWD:
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
@@ -220,7 +172,7 @@ static int btf_type_size(struct btf_type *t)
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
default:
- pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
+ pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
return -EINVAL;
}
}
@@ -264,7 +216,7 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
static bool btf_type_is_void(const struct btf_type *t)
{
- return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+ return t == &btf_void || btf_is_fwd(t);
}
static bool btf_type_is_void_or_null(const struct btf_type *t)
@@ -285,7 +237,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
i++) {
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
@@ -304,7 +256,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
type_id = t->type;
break;
case BTF_KIND_ARRAY:
- array = (const struct btf_array *)(t + 1);
+ array = btf_array(t);
if (nelems && array->nelems > UINT32_MAX / nelems)
return -E2BIG;
nelems *= array->nelems;
@@ -335,8 +287,7 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
t = btf__type_by_id(btf, type_id);
while (depth < MAX_RESOLVE_DEPTH &&
!btf_type_is_void_or_null(t) &&
- (IS_MODIFIER(BTF_INFO_KIND(t->info)) ||
- IS_VAR(BTF_INFO_KIND(t->info)))) {
+ (btf_is_mod(t) || btf_is_typedef(t) || btf_is_var(t))) {
type_id = t->type;
t = btf__type_by_id(btf, type_id);
depth++;
@@ -555,11 +506,11 @@ static int compare_vsi_off(const void *_a, const void *_b)
static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
struct btf_type *t)
{
- __u32 size = 0, off = 0, i, vars = BTF_INFO_VLEN(t->info);
+ __u32 size = 0, off = 0, i, vars = btf_vlen(t);
const char *name = btf__name_by_offset(btf, t->name_off);
const struct btf_type *t_var;
struct btf_var_secinfo *vsi;
- struct btf_var *var;
+ const struct btf_var *var;
int ret;
if (!name) {
@@ -575,12 +526,11 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
t->size = size;
- for (i = 0, vsi = (struct btf_var_secinfo *)(t + 1);
- i < vars; i++, vsi++) {
+ for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
- var = (struct btf_var *)(t_var + 1);
+ var = btf_var(t_var);
- if (BTF_INFO_KIND(t_var->info) != BTF_KIND_VAR) {
+ if (!btf_is_var(t_var)) {
pr_debug("Non-VAR type seen in section %s\n", name);
return -EINVAL;
}
@@ -596,7 +546,8 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
ret = bpf_object__variable_offset(obj, name, &off);
if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n", name);
+ pr_debug("No offset found in symbol table for VAR %s\n",
+ name);
return -ENOENT;
}
@@ -620,7 +571,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
* is section size and global variable offset. We use
* the info from the ELF itself for this purpose.
*/
- if (BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC) {
+ if (btf_is_datasec(t)) {
err = btf_fixup_datasec(obj, btf, t);
if (err)
break;
@@ -775,14 +726,13 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
return -EINVAL;
}
- if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
- BTF_INFO_VLEN(container_type->info) < 2) {
+ if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
pr_warning("map:%s container_name:%s is an invalid container struct\n",
map_name, container_name);
return -EINVAL;
}
- key = (struct btf_member *)(container_type + 1);
+ key = btf_members(container_type);
value = key + 1;
key_size = btf__resolve_size(btf, key->type);
@@ -832,6 +782,9 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
/* The start of the info sec (including the __u32 record_size). */
void *info;
+ if (ext_sec->len == 0)
+ return 0;
+
if (ext_sec->off & 0x03) {
pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
ext_sec->desc);
@@ -935,11 +888,24 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param);
}
+static int btf_ext_setup_offset_reloc(struct btf_ext *btf_ext)
+{
+ struct btf_ext_sec_setup_param param = {
+ .off = btf_ext->hdr->offset_reloc_off,
+ .len = btf_ext->hdr->offset_reloc_len,
+ .min_rec_size = sizeof(struct bpf_offset_reloc),
+ .ext_info = &btf_ext->offset_reloc_info,
+ .desc = "offset_reloc",
+ };
+
+ return btf_ext_setup_info(btf_ext, &param);
+}
+
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
- if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
+ if (data_size < offsetofend(struct btf_ext_header, hdr_len) ||
data_size < hdr->hdr_len) {
pr_debug("BTF.ext header not found");
return -EINVAL;
@@ -997,6 +963,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
+ if (btf_ext->hdr->hdr_len <
+ offsetofend(struct btf_ext_header, line_info_len))
+ goto done;
err = btf_ext_setup_func_info(btf_ext);
if (err)
goto done;
@@ -1005,6 +974,13 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (err)
goto done;
+ if (btf_ext->hdr->hdr_len <
+ offsetofend(struct btf_ext_header, offset_reloc_len))
+ goto done;
+ err = btf_ext_setup_offset_reloc(btf_ext);
+ if (err)
+ goto done;
+
done:
if (err) {
btf_ext__free(btf_ext);
@@ -1441,10 +1417,9 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
d->map[0] = 0;
for (i = 1; i <= btf->nr_types; i++) {
struct btf_type *t = d->btf->types[i];
- __u16 kind = BTF_INFO_KIND(t->info);
/* VAR and DATASEC are never deduped and are self-canonical */
- if (kind == BTF_KIND_VAR || kind == BTF_KIND_DATASEC)
+ if (btf_is_var(t) || btf_is_datasec(t))
d->map[i] = i;
else
d->map[i] = BTF_UNPROCESSED_ID;
@@ -1485,11 +1460,11 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
if (r)
return r;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *m = (struct btf_member *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1500,8 +1475,8 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
break;
}
case BTF_KIND_ENUM: {
- struct btf_enum *m = (struct btf_enum *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_enum *m = btf_enum(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1512,8 +1487,8 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
break;
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_param *m = btf_params(t);
+ __u16 vlen = btf_vlen(t);
for (j = 0; j < vlen; j++) {
r = fn(&m->name_off, ctx);
@@ -1802,16 +1777,16 @@ static long btf_hash_enum(struct btf_type *t)
/* Check structural equality of two ENUMs. */
static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_enum *m1, *m2;
+ const struct btf_enum *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_enum *)(t1 + 1);
- m2 = (struct btf_enum *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_enum(t1);
+ m2 = btf_enum(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->val != m2->val)
return false;
@@ -1823,8 +1798,7 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
static inline bool btf_is_enum_fwd(struct btf_type *t)
{
- return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
- BTF_INFO_VLEN(t->info) == 0;
+ return btf_is_enum(t) && btf_vlen(t) == 0;
}
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
@@ -1844,8 +1818,8 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
*/
static long btf_hash_struct(struct btf_type *t)
{
- struct btf_member *member = (struct btf_member *)(t + 1);
- __u32 vlen = BTF_INFO_VLEN(t->info);
+ const struct btf_member *member = btf_members(t);
+ __u32 vlen = btf_vlen(t);
long h = btf_hash_common(t);
int i;
@@ -1865,16 +1839,16 @@ static long btf_hash_struct(struct btf_type *t)
*/
static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_member *m1, *m2;
+ const struct btf_member *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_member *)(t1 + 1);
- m2 = (struct btf_member *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->offset != m2->offset)
return false;
@@ -1891,7 +1865,7 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
*/
static long btf_hash_array(struct btf_type *t)
{
- struct btf_array *info = (struct btf_array *)(t + 1);
+ const struct btf_array *info = btf_array(t);
long h = btf_hash_common(t);
h = hash_combine(h, info->type);
@@ -1909,13 +1883,13 @@ static long btf_hash_array(struct btf_type *t)
*/
static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_array *info1, *info2;
+ const struct btf_array *info1, *info2;
if (!btf_equal_common(t1, t2))
return false;
- info1 = (struct btf_array *)(t1 + 1);
- info2 = (struct btf_array *)(t2 + 1);
+ info1 = btf_array(t1);
+ info2 = btf_array(t2);
return info1->type == info2->type &&
info1->index_type == info2->index_type &&
info1->nelems == info2->nelems;
@@ -1928,14 +1902,10 @@ static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
*/
static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_array *info1, *info2;
-
if (!btf_equal_common(t1, t2))
return false;
- info1 = (struct btf_array *)(t1 + 1);
- info2 = (struct btf_array *)(t2 + 1);
- return info1->nelems == info2->nelems;
+ return btf_array(t1)->nelems == btf_array(t2)->nelems;
}
/*
@@ -1945,8 +1915,8 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
*/
static long btf_hash_fnproto(struct btf_type *t)
{
- struct btf_param *member = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ const struct btf_param *member = btf_params(t);
+ __u16 vlen = btf_vlen(t);
long h = btf_hash_common(t);
int i;
@@ -1967,16 +1937,16 @@ static long btf_hash_fnproto(struct btf_type *t)
*/
static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_param *m1, *m2;
+ const struct btf_param *m1, *m2;
__u16 vlen;
int i;
if (!btf_equal_common(t1, t2))
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_param *)(t1 + 1);
- m2 = (struct btf_param *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off || m1->type != m2->type)
return false;
@@ -1993,7 +1963,7 @@ static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
*/
static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
{
- struct btf_param *m1, *m2;
+ const struct btf_param *m1, *m2;
__u16 vlen;
int i;
@@ -2001,9 +1971,9 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
if (t1->name_off != t2->name_off || t1->info != t2->info)
return false;
- vlen = BTF_INFO_VLEN(t1->info);
- m1 = (struct btf_param *)(t1 + 1);
- m2 = (struct btf_param *)(t2 + 1);
+ vlen = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
for (i = 0; i < vlen; i++) {
if (m1->name_off != m2->name_off)
return false;
@@ -2029,7 +1999,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
__u32 cand_id;
long h;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
@@ -2142,13 +2112,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
{
__u32 orig_type_id = type_id;
- if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ if (!btf_is_fwd(d->btf->types[type_id]))
return type_id;
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
type_id = d->map[type_id];
- if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
+ if (!btf_is_fwd(d->btf->types[type_id]))
return type_id;
return orig_type_id;
@@ -2157,7 +2127,7 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
static inline __u16 btf_fwd_kind(struct btf_type *t)
{
- return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
+ return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
}
/*
@@ -2278,8 +2248,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
cand_type = d->btf->types[cand_id];
canon_type = d->btf->types[canon_id];
- cand_kind = BTF_INFO_KIND(cand_type->info);
- canon_kind = BTF_INFO_KIND(canon_type->info);
+ cand_kind = btf_kind(cand_type);
+ canon_kind = btf_kind(canon_type);
if (cand_type->name_off != canon_type->name_off)
return 0;
@@ -2328,12 +2298,12 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
case BTF_KIND_ARRAY: {
- struct btf_array *cand_arr, *canon_arr;
+ const struct btf_array *cand_arr, *canon_arr;
if (!btf_compat_array(cand_type, canon_type))
return 0;
- cand_arr = (struct btf_array *)(cand_type + 1);
- canon_arr = (struct btf_array *)(canon_type + 1);
+ cand_arr = btf_array(cand_type);
+ canon_arr = btf_array(canon_type);
eq = btf_dedup_is_equiv(d,
cand_arr->index_type, canon_arr->index_type);
if (eq <= 0)
@@ -2343,14 +2313,14 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *cand_m, *canon_m;
+ const struct btf_member *cand_m, *canon_m;
__u16 vlen;
if (!btf_shallow_equal_struct(cand_type, canon_type))
return 0;
- vlen = BTF_INFO_VLEN(cand_type->info);
- cand_m = (struct btf_member *)(cand_type + 1);
- canon_m = (struct btf_member *)(canon_type + 1);
+ vlen = btf_vlen(cand_type);
+ cand_m = btf_members(cand_type);
+ canon_m = btf_members(canon_type);
for (i = 0; i < vlen; i++) {
eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
if (eq <= 0)
@@ -2363,7 +2333,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *cand_p, *canon_p;
+ const struct btf_param *cand_p, *canon_p;
__u16 vlen;
if (!btf_compat_fnproto(cand_type, canon_type))
@@ -2371,9 +2341,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
if (eq <= 0)
return eq;
- vlen = BTF_INFO_VLEN(cand_type->info);
- cand_p = (struct btf_param *)(cand_type + 1);
- canon_p = (struct btf_param *)(canon_type + 1);
+ vlen = btf_vlen(cand_type);
+ cand_p = btf_params(cand_type);
+ canon_p = btf_params(canon_type);
for (i = 0; i < vlen; i++) {
eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
if (eq <= 0)
@@ -2428,8 +2398,8 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
targ_type_id = d->hypot_map[cand_type_id];
t_id = resolve_type_id(d, targ_type_id);
c_id = resolve_type_id(d, cand_type_id);
- t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
- c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
+ t_kind = btf_kind(d->btf->types[t_id]);
+ c_kind = btf_kind(d->btf->types[c_id]);
/*
* Resolve FWD into STRUCT/UNION.
* It's ok to resolve FWD into STRUCT/UNION that's not yet
@@ -2498,7 +2468,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
return 0;
t = d->btf->types[type_id];
- kind = BTF_INFO_KIND(t->info);
+ kind = btf_kind(t);
if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
return 0;
@@ -2593,7 +2563,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
t = d->btf->types[type_id];
d->map[type_id] = BTF_IN_PROGRESS_ID;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_CONST:
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
@@ -2617,7 +2587,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_ARRAY: {
- struct btf_array *info = (struct btf_array *)(t + 1);
+ struct btf_array *info = btf_array(t);
ref_type_id = btf_dedup_ref_type(d, info->type);
if (ref_type_id < 0)
@@ -2651,8 +2621,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
return ref_type_id;
t->type = ref_type_id;
- vlen = BTF_INFO_VLEN(t->info);
- param = (struct btf_param *)(t + 1);
+ vlen = btf_vlen(t);
+ param = btf_params(t);
for (i = 0; i < vlen; i++) {
ref_type_id = btf_dedup_ref_type(d, param->type);
if (ref_type_id < 0)
@@ -2792,7 +2762,7 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
struct btf_type *t = d->btf->types[type_id];
int i, r;
- switch (BTF_INFO_KIND(t->info)) {
+ switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
break;
@@ -2812,7 +2782,7 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_ARRAY: {
- struct btf_array *arr_info = (struct btf_array *)(t + 1);
+ struct btf_array *arr_info = btf_array(t);
r = btf_dedup_remap_type_id(d, arr_info->type);
if (r < 0)
@@ -2827,8 +2797,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *member = (struct btf_member *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_member *member = btf_members(t);
+ __u16 vlen = btf_vlen(t);
for (i = 0; i < vlen; i++) {
r = btf_dedup_remap_type_id(d, member->type);
@@ -2841,8 +2811,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *param = (struct btf_param *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_param *param = btf_params(t);
+ __u16 vlen = btf_vlen(t);
r = btf_dedup_remap_type_id(d, t->type);
if (r < 0)
@@ -2860,8 +2830,8 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
}
case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *var = (struct btf_var_secinfo *)(t + 1);
- __u16 vlen = BTF_INFO_VLEN(t->info);
+ struct btf_var_secinfo *var = btf_var_secinfos(t);
+ __u16 vlen = btf_vlen(t);
for (i = 0; i < vlen; i++) {
r = btf_dedup_remap_type_id(d, var->type);
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 88a52ae56fc6..9cb44b4fbf60 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -5,6 +5,7 @@
#define __LIBBPF_BTF_H
#include <stdarg.h>
+#include <linux/btf.h>
#include <linux/types.h>
#ifdef __cplusplus
@@ -57,6 +58,10 @@ struct btf_ext_header {
__u32 func_info_len;
__u32 line_info_off;
__u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+ __u32 offset_reloc_off;
+ __u32 offset_reloc_len;
};
LIBBPF_API void btf__free(struct btf *btf);
@@ -120,6 +125,183 @@ LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
+/*
+ * A set of helpers for easier BTF types handling
+ */
+static inline __u16 btf_kind(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info);
+}
+
+static inline __u16 btf_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline bool btf_is_int(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_INT;
+}
+
+static inline bool btf_is_ptr(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_PTR;
+}
+
+static inline bool btf_is_array(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ARRAY;
+}
+
+static inline bool btf_is_struct(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_STRUCT;
+}
+
+static inline bool btf_is_union(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_composite(const struct btf_type *t)
+{
+ __u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
+static inline bool btf_is_enum(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_is_fwd(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FWD;
+}
+
+static inline bool btf_is_typedef(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_is_volatile(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_VOLATILE;
+}
+
+static inline bool btf_is_const(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_CONST;
+}
+
+static inline bool btf_is_restrict(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_RESTRICT;
+}
+
+static inline bool btf_is_mod(const struct btf_type *t)
+{
+ __u16 kind = btf_kind(t);
+
+ return kind == BTF_KIND_VOLATILE ||
+ kind == BTF_KIND_CONST ||
+ kind == BTF_KIND_RESTRICT;
+}
+
+static inline bool btf_is_func(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_is_func_proto(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FUNC_PROTO;
+}
+
+static inline bool btf_is_var(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_VAR;
+}
+
+static inline bool btf_is_datasec(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_DATASEC;
+}
+
+static inline __u8 btf_int_encoding(const struct btf_type *t)
+{
+ return BTF_INT_ENCODING(*(__u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_offset(const struct btf_type *t)
+{
+ return BTF_INT_OFFSET(*(__u32 *)(t + 1));
+}
+
+static inline __u8 btf_int_bits(const struct btf_type *t)
+{
+ return BTF_INT_BITS(*(__u32 *)(t + 1));
+}
+
+static inline struct btf_array *btf_array(const struct btf_type *t)
+{
+ return (struct btf_array *)(t + 1);
+}
+
+static inline struct btf_enum *btf_enum(const struct btf_type *t)
+{
+ return (struct btf_enum *)(t + 1);
+}
+
+static inline struct btf_member *btf_members(const struct btf_type *t)
+{
+ return (struct btf_member *)(t + 1);
+}
+
+/* Get bit offset of a member with specified index. */
+static inline __u32 btf_member_bit_offset(const struct btf_type *t,
+ __u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+ bool kflag = btf_kflag(t);
+
+ return kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
+}
+/*
+ * Get bitfield size of a member, assuming t is BTF_KIND_STRUCT or
+ * BTF_KIND_UNION. If member is not a bitfield, zero is returned.
+ */
+static inline __u32 btf_member_bitfield_size(const struct btf_type *t,
+ __u32 member_idx)
+{
+ const struct btf_member *m = btf_members(t) + member_idx;
+ bool kflag = btf_kflag(t);
+
+ return kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
+}
+
+static inline struct btf_param *btf_params(const struct btf_type *t)
+{
+ return (struct btf_param *)(t + 1);
+}
+
+static inline struct btf_var *btf_var(const struct btf_type *t)
+{
+ return (struct btf_var *)(t + 1);
+}
+
+static inline struct btf_var_secinfo *
+btf_var_secinfos(const struct btf_type *t)
+{
+ return (struct btf_var_secinfo *)(t + 1);
+}
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 7065bb5b2752..715967762312 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -100,21 +100,6 @@ static bool str_equal_fn(const void *a, const void *b, void *ctx)
return strcmp(a, b) == 0;
}
-static __u16 btf_kind_of(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info);
-}
-
-static __u16 btf_vlen_of(const struct btf_type *t)
-{
- return BTF_INFO_VLEN(t->info);
-}
-
-static bool btf_kflag_of(const struct btf_type *t)
-{
- return BTF_INFO_KFLAG(t->info);
-}
-
static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
{
return btf__name_by_offset(d->btf, name_off);
@@ -349,7 +334,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
*/
struct btf_dump_type_aux_state *tstate = &d->type_states[id];
const struct btf_type *t;
- __u16 kind, vlen;
+ __u16 vlen;
int err, i;
/* return true, letting typedefs know that it's ok to be emitted */
@@ -357,18 +342,16 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return 1;
t = btf__type_by_id(d->btf, id);
- kind = btf_kind_of(t);
if (tstate->order_state == ORDERING) {
/* type loop, but resolvable through fwd declaration */
- if ((kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION) &&
- through_ptr && t->name_off != 0)
+ if (btf_is_composite(t) && through_ptr && t->name_off != 0)
return 0;
pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
return -ELOOP;
}
- switch (kind) {
+ switch (btf_kind(t)) {
case BTF_KIND_INT:
tstate->order_state = ORDERED;
return 0;
@@ -378,14 +361,12 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
tstate->order_state = ORDERED;
return err;
- case BTF_KIND_ARRAY: {
- const struct btf_array *a = (void *)(t + 1);
+ case BTF_KIND_ARRAY:
+ return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
- return btf_dump_order_type(d, a->type, through_ptr);
- }
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- const struct btf_member *m = (void *)(t + 1);
+ const struct btf_member *m = btf_members(t);
/*
* struct/union is part of strong link, only if it's embedded
* (so no ptr in a path) or it's anonymous (so has to be
@@ -396,7 +377,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
tstate->order_state = ORDERING;
- vlen = btf_vlen_of(t);
+ vlen = btf_vlen(t);
for (i = 0; i < vlen; i++, m++) {
err = btf_dump_order_type(d, m->type, false);
if (err < 0)
@@ -447,7 +428,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return btf_dump_order_type(d, t->type, through_ptr);
case BTF_KIND_FUNC_PROTO: {
- const struct btf_param *p = (void *)(t + 1);
+ const struct btf_param *p = btf_params(t);
bool is_strong;
err = btf_dump_order_type(d, t->type, through_ptr);
@@ -455,7 +436,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return err;
is_strong = err > 0;
- vlen = btf_vlen_of(t);
+ vlen = btf_vlen(t);
for (i = 0; i < vlen; i++, p++) {
err = btf_dump_order_type(d, p->type, through_ptr);
if (err < 0)
@@ -553,7 +534,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
return;
t = btf__type_by_id(d->btf, id);
- kind = btf_kind_of(t);
+ kind = btf_kind(t);
if (top_level_def && t->name_off == 0) {
pr_warning("unexpected nameless definition, id:[%u]\n", id);
@@ -618,12 +599,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
case BTF_KIND_RESTRICT:
btf_dump_emit_type(d, t->type, cont_id);
break;
- case BTF_KIND_ARRAY: {
- const struct btf_array *a = (void *)(t + 1);
-
- btf_dump_emit_type(d, a->type, cont_id);
+ case BTF_KIND_ARRAY:
+ btf_dump_emit_type(d, btf_array(t)->type, cont_id);
break;
- }
case BTF_KIND_FWD:
btf_dump_emit_fwd_def(d, id, t);
btf_dump_printf(d, ";\n\n");
@@ -656,8 +634,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
* applicable
*/
if (top_level_def || t->name_off == 0) {
- const struct btf_member *m = (void *)(t + 1);
- __u16 vlen = btf_vlen_of(t);
+ const struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
int i, new_cont_id;
new_cont_id = t->name_off == 0 ? cont_id : id;
@@ -678,8 +656,8 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
}
break;
case BTF_KIND_FUNC_PROTO: {
- const struct btf_param *p = (void *)(t + 1);
- __u16 vlen = btf_vlen_of(t);
+ const struct btf_param *p = btf_params(t);
+ __u16 vlen = btf_vlen(t);
int i;
btf_dump_emit_type(d, t->type, cont_id);
@@ -696,7 +674,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
static int btf_align_of(const struct btf *btf, __u32 id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
- __u16 kind = btf_kind_of(t);
+ __u16 kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
@@ -709,15 +687,12 @@ static int btf_align_of(const struct btf *btf, __u32 id)
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
return btf_align_of(btf, t->type);
- case BTF_KIND_ARRAY: {
- const struct btf_array *a = (void *)(t + 1);
-
- return btf_align_of(btf, a->type);
- }
+ case BTF_KIND_ARRAY:
+ return btf_align_of(btf, btf_array(t)->type);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- const struct btf_member *m = (void *)(t + 1);
- __u16 vlen = btf_vlen_of(t);
+ const struct btf_member *m = btf_members(t);
+ __u16 vlen = btf_vlen(t);
int i, align = 1;
for (i = 0; i < vlen; i++, m++)
@@ -726,7 +701,7 @@ static int btf_align_of(const struct btf *btf, __u32 id)
return align;
}
default:
- pr_warning("unsupported BTF_KIND:%u\n", btf_kind_of(t));
+ pr_warning("unsupported BTF_KIND:%u\n", btf_kind(t));
return 1;
}
}
@@ -737,20 +712,18 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
const struct btf_member *m;
int align, i, bit_sz;
__u16 vlen;
- bool kflag;
align = btf_align_of(btf, id);
/* size of a non-packed struct has to be a multiple of its alignment*/
if (t->size % align)
return true;
- m = (void *)(t + 1);
- kflag = btf_kflag_of(t);
- vlen = btf_vlen_of(t);
+ m = btf_members(t);
+ vlen = btf_vlen(t);
/* all non-bitfield fields have to be naturally aligned */
for (i = 0; i < vlen; i++, m++) {
align = btf_align_of(btf, m->type);
- bit_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
+ bit_sz = btf_member_bitfield_size(t, i);
if (bit_sz == 0 && m->offset % (8 * align) != 0)
return true;
}
@@ -807,7 +780,7 @@ static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
btf_dump_printf(d, "%s %s",
- btf_kind_of(t) == BTF_KIND_STRUCT ? "struct" : "union",
+ btf_is_struct(t) ? "struct" : "union",
btf_dump_type_name(d, id));
}
@@ -816,12 +789,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
const struct btf_type *t,
int lvl)
{
- const struct btf_member *m = (void *)(t + 1);
- bool kflag = btf_kflag_of(t), is_struct;
+ const struct btf_member *m = btf_members(t);
+ bool is_struct = btf_is_struct(t);
int align, i, packed, off = 0;
- __u16 vlen = btf_vlen_of(t);
+ __u16 vlen = btf_vlen(t);
- is_struct = btf_kind_of(t) == BTF_KIND_STRUCT;
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
align = packed ? 1 : btf_align_of(d->btf, id);
@@ -835,8 +807,8 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
int m_off, m_sz;
fname = btf_name_of(d, m->name_off);
- m_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
- m_off = kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
+ m_sz = btf_member_bitfield_size(t, i);
+ m_off = btf_member_bit_offset(t, i);
align = packed ? 1 : btf_align_of(d->btf, m->type);
btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
@@ -870,8 +842,8 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
const struct btf_type *t,
int lvl)
{
- const struct btf_enum *v = (void *)(t+1);
- __u16 vlen = btf_vlen_of(t);
+ const struct btf_enum *v = btf_enum(t);
+ __u16 vlen = btf_vlen(t);
const char *name;
size_t dup_cnt;
int i;
@@ -905,7 +877,7 @@ static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
{
const char *name = btf_dump_type_name(d, id);
- if (btf_kflag_of(t))
+ if (btf_kflag(t))
btf_dump_printf(d, "union %s", name);
else
btf_dump_printf(d, "struct %s", name);
@@ -987,7 +959,6 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
struct id_stack decl_stack;
const struct btf_type *t;
int err, stack_start;
- __u16 kind;
stack_start = d->decl_stack_cnt;
for (;;) {
@@ -1008,8 +979,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
break;
t = btf__type_by_id(d->btf, id);
- kind = btf_kind_of(t);
- switch (kind) {
+ switch (btf_kind(t)) {
case BTF_KIND_PTR:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
@@ -1017,12 +987,9 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
case BTF_KIND_FUNC_PROTO:
id = t->type;
break;
- case BTF_KIND_ARRAY: {
- const struct btf_array *a = (void *)(t + 1);
-
- id = a->type;
+ case BTF_KIND_ARRAY:
+ id = btf_array(t)->type;
break;
- }
case BTF_KIND_INT:
case BTF_KIND_ENUM:
case BTF_KIND_FWD:
@@ -1032,7 +999,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
goto done;
default:
pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
- kind, id);
+ btf_kind(t), id);
goto done;
}
}
@@ -1070,7 +1037,7 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
id = decl_stack->ids[decl_stack->cnt - 1];
t = btf__type_by_id(d->btf, id);
- switch (btf_kind_of(t)) {
+ switch (btf_kind(t)) {
case BTF_KIND_VOLATILE:
btf_dump_printf(d, "volatile ");
break;
@@ -1087,20 +1054,6 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
}
}
-static bool btf_is_mod_kind(const struct btf *btf, __u32 id)
-{
- const struct btf_type *t = btf__type_by_id(btf, id);
-
- switch (btf_kind_of(t)) {
- case BTF_KIND_VOLATILE:
- case BTF_KIND_CONST:
- case BTF_KIND_RESTRICT:
- return true;
- default:
- return false;
- }
-}
-
static void btf_dump_emit_name(const struct btf_dump *d,
const char *name, bool last_was_ptr)
{
@@ -1139,7 +1092,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
}
t = btf__type_by_id(d->btf, id);
- kind = btf_kind_of(t);
+ kind = btf_kind(t);
switch (kind) {
case BTF_KIND_INT:
@@ -1185,7 +1138,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
btf_dump_printf(d, " restrict");
break;
case BTF_KIND_ARRAY: {
- const struct btf_array *a = (void *)(t + 1);
+ const struct btf_array *a = btf_array(t);
const struct btf_type *next_t;
__u32 next_id;
bool multidim;
@@ -1201,7 +1154,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
*/
while (decls->cnt) {
next_id = decls->ids[decls->cnt - 1];
- if (btf_is_mod_kind(d->btf, next_id))
+ next_t = btf__type_by_id(d->btf, next_id);
+ if (btf_is_mod(next_t))
decls->cnt--;
else
break;
@@ -1214,7 +1168,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
}
next_t = btf__type_by_id(d->btf, next_id);
- multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY;
+ multidim = btf_is_array(next_t);
/* we need space if we have named non-pointer */
if (fname[0] && !last_was_ptr)
btf_dump_printf(d, " ");
@@ -1228,8 +1182,8 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
return;
}
case BTF_KIND_FUNC_PROTO: {
- const struct btf_param *p = (void *)(t + 1);
- __u16 vlen = btf_vlen_of(t);
+ const struct btf_param *p = btf_params(t);
+ __u16 vlen = btf_vlen(t);
int i;
btf_dump_emit_mods(d, decls);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2b57d7ea7836..e0276520171b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -39,6 +39,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/vfs.h>
+#include <sys/utsname.h>
#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
@@ -48,6 +49,7 @@
#include "btf.h"
#include "str_error.h"
#include "libbpf_internal.h"
+#include "hashmap.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -75,9 +77,12 @@ static int __base_pr(enum libbpf_print_level level, const char *format,
static libbpf_print_fn_t __libbpf_pr = __base_pr;
-void libbpf_set_print(libbpf_print_fn_t fn)
+libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
{
+ libbpf_print_fn_t old_print_fn = __libbpf_pr;
+
__libbpf_pr = fn;
+ return old_print_fn;
}
__printf(2, 3)
@@ -1010,23 +1015,21 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return 0;
}
-static const struct btf_type *skip_mods_and_typedefs(const struct btf *btf,
- __u32 id)
+static const struct btf_type *
+skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
- while (true) {
- switch (BTF_INFO_KIND(t->info)) {
- case BTF_KIND_VOLATILE:
- case BTF_KIND_CONST:
- case BTF_KIND_RESTRICT:
- case BTF_KIND_TYPEDEF:
- t = btf__type_by_id(btf, t->type);
- break;
- default:
- return t;
- }
+ if (res_id)
+ *res_id = id;
+
+ while (btf_is_mod(t) || btf_is_typedef(t)) {
+ if (res_id)
+ *res_id = t->type;
+ t = btf__type_by_id(btf, t->type);
}
+
+ return t;
}
/*
@@ -1039,14 +1042,14 @@ static const struct btf_type *skip_mods_and_typedefs(const struct btf *btf,
static bool get_map_field_int(const char *map_name, const struct btf *btf,
const struct btf_type *def,
const struct btf_member *m, __u32 *res) {
- const struct btf_type *t = skip_mods_and_typedefs(btf, m->type);
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
const char *name = btf__name_by_offset(btf, m->name_off);
const struct btf_array *arr_info;
const struct btf_type *arr_t;
- if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) {
+ if (!btf_is_ptr(t)) {
pr_warning("map '%s': attr '%s': expected PTR, got %u.\n",
- map_name, name, BTF_INFO_KIND(t->info));
+ map_name, name, btf_kind(t));
return false;
}
@@ -1056,12 +1059,12 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
map_name, name, t->type);
return false;
}
- if (BTF_INFO_KIND(arr_t->info) != BTF_KIND_ARRAY) {
+ if (!btf_is_array(arr_t)) {
pr_warning("map '%s': attr '%s': expected ARRAY, got %u.\n",
- map_name, name, BTF_INFO_KIND(arr_t->info));
+ map_name, name, btf_kind(arr_t));
return false;
}
- arr_info = (const void *)(arr_t + 1);
+ arr_info = btf_array(arr_t);
*res = arr_info->nelems;
return true;
}
@@ -1079,11 +1082,11 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
struct bpf_map *map;
int vlen, i;
- vi = (const struct btf_var_secinfo *)(const void *)(sec + 1) + var_idx;
+ vi = btf_var_secinfos(sec) + var_idx;
var = btf__type_by_id(obj->btf, vi->type);
- var_extra = (const void *)(var + 1);
+ var_extra = btf_var(var);
map_name = btf__name_by_offset(obj->btf, var->name_off);
- vlen = BTF_INFO_VLEN(var->info);
+ vlen = btf_vlen(var);
if (map_name == NULL || map_name[0] == '\0') {
pr_warning("map #%d: empty name.\n", var_idx);
@@ -1093,9 +1096,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
pr_warning("map '%s' BTF data is corrupted.\n", map_name);
return -EINVAL;
}
- if (BTF_INFO_KIND(var->info) != BTF_KIND_VAR) {
+ if (!btf_is_var(var)) {
pr_warning("map '%s': unexpected var kind %u.\n",
- map_name, BTF_INFO_KIND(var->info));
+ map_name, btf_kind(var));
return -EINVAL;
}
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
@@ -1105,10 +1108,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return -EOPNOTSUPP;
}
- def = skip_mods_and_typedefs(obj->btf, var->type);
- if (BTF_INFO_KIND(def->info) != BTF_KIND_STRUCT) {
+ def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
+ if (!btf_is_struct(def)) {
pr_warning("map '%s': unexpected def kind %u.\n",
- map_name, BTF_INFO_KIND(var->info));
+ map_name, btf_kind(var));
return -EINVAL;
}
if (def->size > vi->size) {
@@ -1131,8 +1134,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
map_name, map->sec_idx, map->sec_offset);
- vlen = BTF_INFO_VLEN(def->info);
- m = (const void *)(def + 1);
+ vlen = btf_vlen(def);
+ m = btf_members(def);
for (i = 0; i < vlen; i++, m++) {
const char *name = btf__name_by_offset(obj->btf, m->name_off);
@@ -1182,9 +1185,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map_name, m->type);
return -EINVAL;
}
- if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) {
+ if (!btf_is_ptr(t)) {
pr_warning("map '%s': key spec is not PTR: %u.\n",
- map_name, BTF_INFO_KIND(t->info));
+ map_name, btf_kind(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
@@ -1225,9 +1228,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map_name, m->type);
return -EINVAL;
}
- if (BTF_INFO_KIND(t->info) != BTF_KIND_PTR) {
+ if (!btf_is_ptr(t)) {
pr_warning("map '%s': value spec is not PTR: %u.\n",
- map_name, BTF_INFO_KIND(t->info));
+ map_name, btf_kind(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
@@ -1288,7 +1291,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
nr_types = btf__get_nr_types(obj->btf);
for (i = 1; i <= nr_types; i++) {
t = btf__type_by_id(obj->btf, i);
- if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
+ if (!btf_is_datasec(t))
continue;
name = btf__name_by_offset(obj->btf, t->name_off);
if (strcmp(name, MAPS_ELF_SEC) == 0) {
@@ -1302,7 +1305,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict)
return -ENOENT;
}
- vlen = BTF_INFO_VLEN(sec->info);
+ vlen = btf_vlen(sec);
for (i = 0; i < vlen; i++) {
err = bpf_object__init_user_btf_map(obj, sec, i,
obj->efile.btf_maps_shndx,
@@ -1363,16 +1366,14 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
struct btf *btf = obj->btf;
struct btf_type *t;
int i, j, vlen;
- __u16 kind;
if (!obj->btf || (has_func && has_datasec))
return;
for (i = 1; i <= btf__get_nr_types(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
- kind = BTF_INFO_KIND(t->info);
- if (!has_datasec && kind == BTF_KIND_VAR) {
+ if (!has_datasec && btf_is_var(t)) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
/*
@@ -1381,11 +1382,11 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
* original variable took less than 4 bytes
*/
t->size = 1;
- *(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
- } else if (!has_datasec && kind == BTF_KIND_DATASEC) {
+ *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
+ } else if (!has_datasec && btf_is_datasec(t)) {
/* replace DATASEC with STRUCT */
- struct btf_var_secinfo *v = (void *)(t + 1);
- struct btf_member *m = (void *)(t + 1);
+ const struct btf_var_secinfo *v = btf_var_secinfos(t);
+ struct btf_member *m = btf_members(t);
struct btf_type *vt;
char *name;
@@ -1396,7 +1397,7 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
name++;
}
- vlen = BTF_INFO_VLEN(t->info);
+ vlen = btf_vlen(t);
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
for (j = 0; j < vlen; j++, v++, m++) {
/* order of field assignments is important */
@@ -1406,12 +1407,12 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
- } else if (!has_func && kind == BTF_KIND_FUNC_PROTO) {
+ } else if (!has_func && btf_is_func_proto(t)) {
/* replace FUNC_PROTO with ENUM */
- vlen = BTF_INFO_VLEN(t->info);
+ vlen = btf_vlen(t);
t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
t->size = sizeof(__u32); /* kernel enforced */
- } else if (!has_func && kind == BTF_KIND_FUNC) {
+ } else if (!has_func && btf_is_func(t)) {
/* replace FUNC with TYPEDEF */
t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
}
@@ -1769,15 +1770,22 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
(long long) sym.st_value, sym.st_name, name);
shdr_idx = sym.st_shndx;
+ insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+ pr_debug("relocation: insn_idx=%u, shdr_idx=%u\n",
+ insn_idx, shdr_idx);
+
+ if (shdr_idx >= SHN_LORESERVE) {
+ pr_warning("relocation: not yet supported relo for non-static global \'%s\' variable in special section (0x%x) found in insns[%d].code 0x%x\n",
+ name, shdr_idx, insn_idx,
+ insns[insn_idx].code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
if (!bpf_object__relo_in_known_section(obj, shdr_idx)) {
pr_warning("Program '%s' contains unrecognized relo data pointing to section %u\n",
prog->section_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- pr_debug("relocation: insn_idx=%u\n", insn_idx);
-
if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
pr_warning("incorrect bpf_call opcode\n");
@@ -2288,6 +2296,894 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
return 0;
}
+#define BPF_CORE_SPEC_MAX_LEN 64
+
+/* represents BPF CO-RE field or array element accessor */
+struct bpf_core_accessor {
+ __u32 type_id; /* struct/union type or array element type */
+ __u32 idx; /* field index or array index */
+ const char *name; /* field name or NULL for array accessor */
+};
+
+struct bpf_core_spec {
+ const struct btf *btf;
+ /* high-level spec: named fields and array indices only */
+ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
+ /* high-level spec length */
+ int len;
+ /* raw, low-level spec: 1-to-1 with accessor spec string */
+ int raw_spec[BPF_CORE_SPEC_MAX_LEN];
+ /* raw spec length */
+ int raw_len;
+ /* field byte offset represented by spec */
+ __u32 offset;
+};
+
+static bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+/*
+ * Turn bpf_offset_reloc into a low- and high-level spec representation,
+ * validating correctness along the way, as well as calculating resulting
+ * field offset (in bytes), specified by accessor string. Low-level spec
+ * captures every single level of nestedness, including traversing anonymous
+ * struct/union members. High-level one only captures semantically meaningful
+ * "turning points": named fields and array indicies.
+ * E.g., for this case:
+ *
+ * struct sample {
+ * int __unimportant;
+ * struct {
+ * int __1;
+ * int __2;
+ * int a[7];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ *
+ * int x = &s->a[3]; // access string = '0:1:2:3'
+ *
+ * Low-level spec has 1:1 mapping with each element of access string (it's
+ * just a parsed access string representation): [0, 1, 2, 3].
+ *
+ * High-level spec will capture only 3 points:
+ * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
+ * - field 'a' access (corresponds to '2' in low-level spec);
+ * - array element #3 access (corresponds to '3' in low-level spec).
+ *
+ */
+static int bpf_core_spec_parse(const struct btf *btf,
+ __u32 type_id,
+ const char *spec_str,
+ struct bpf_core_spec *spec)
+{
+ int access_idx, parsed_len, i;
+ const struct btf_type *t;
+ const char *name;
+ __u32 id;
+ __s64 sz;
+
+ if (str_is_empty(spec_str) || *spec_str == ':')
+ return -EINVAL;
+
+ memset(spec, 0, sizeof(*spec));
+ spec->btf = btf;
+
+ /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
+ while (*spec_str) {
+ if (*spec_str == ':')
+ ++spec_str;
+ if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
+ return -EINVAL;
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+ spec_str += parsed_len;
+ spec->raw_spec[spec->raw_len++] = access_idx;
+ }
+
+ if (spec->raw_len == 0)
+ return -EINVAL;
+
+ /* first spec value is always reloc type array index */
+ t = skip_mods_and_typedefs(btf, type_id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[0];
+ spec->spec[0].type_id = id;
+ spec->spec[0].idx = access_idx;
+ spec->len++;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->offset = access_idx * sz;
+
+ for (i = 1; i < spec->raw_len; i++) {
+ t = skip_mods_and_typedefs(btf, id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[i];
+
+ if (btf_is_composite(t)) {
+ const struct btf_member *m;
+ __u32 offset;
+
+ if (access_idx >= btf_vlen(t))
+ return -EINVAL;
+ if (btf_member_bitfield_size(t, access_idx))
+ return -EINVAL;
+
+ offset = btf_member_bit_offset(t, access_idx);
+ if (offset % 8)
+ return -EINVAL;
+ spec->offset += offset / 8;
+
+ m = btf_members(t) + access_idx;
+ if (m->name_off) {
+ name = btf__name_by_offset(btf, m->name_off);
+ if (str_is_empty(name))
+ return -EINVAL;
+
+ spec->spec[spec->len].type_id = id;
+ spec->spec[spec->len].idx = access_idx;
+ spec->spec[spec->len].name = name;
+ spec->len++;
+ }
+
+ id = m->type;
+ } else if (btf_is_array(t)) {
+ const struct btf_array *a = btf_array(t);
+
+ t = skip_mods_and_typedefs(btf, a->type, &id);
+ if (!t || access_idx >= a->nelems)
+ return -EINVAL;
+
+ spec->spec[spec->len].type_id = id;
+ spec->spec[spec->len].idx = access_idx;
+ spec->len++;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->offset += access_idx * sz;
+ } else {
+ pr_warning("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
+ type_id, spec_str, i, id, btf_kind(t));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static bool bpf_core_is_flavor_sep(const char *s)
+{
+ /* check X___Y name pattern, where X and Y are not underscores */
+ return s[0] != '_' && /* X */
+ s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
+ s[4] != '_'; /* Y */
+}
+
+/* Given 'some_struct_name___with_flavor' return the length of a name prefix
+ * before last triple underscore. Struct name part after last triple
+ * underscore is ignored by BPF CO-RE relocation during relocation matching.
+ */
+static size_t bpf_core_essential_name_len(const char *name)
+{
+ size_t n = strlen(name);
+ int i;
+
+ for (i = n - 5; i >= 0; i--) {
+ if (bpf_core_is_flavor_sep(name + i))
+ return i + 1;
+ }
+ return n;
+}
+
+/* dynamically sized list of type IDs */
+struct ids_vec {
+ __u32 *data;
+ int len;
+};
+
+static void bpf_core_free_cands(struct ids_vec *cand_ids)
+{
+ free(cand_ids->data);
+ free(cand_ids);
+}
+
+static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
+ __u32 local_type_id,
+ const struct btf *targ_btf)
+{
+ size_t local_essent_len, targ_essent_len;
+ const char *local_name, *targ_name;
+ const struct btf_type *t;
+ struct ids_vec *cand_ids;
+ __u32 *new_ids;
+ int i, err, n;
+
+ t = btf__type_by_id(local_btf, local_type_id);
+ if (!t)
+ return ERR_PTR(-EINVAL);
+
+ local_name = btf__name_by_offset(local_btf, t->name_off);
+ if (str_is_empty(local_name))
+ return ERR_PTR(-EINVAL);
+ local_essent_len = bpf_core_essential_name_len(local_name);
+
+ cand_ids = calloc(1, sizeof(*cand_ids));
+ if (!cand_ids)
+ return ERR_PTR(-ENOMEM);
+
+ n = btf__get_nr_types(targ_btf);
+ for (i = 1; i <= n; i++) {
+ t = btf__type_by_id(targ_btf, i);
+ targ_name = btf__name_by_offset(targ_btf, t->name_off);
+ if (str_is_empty(targ_name))
+ continue;
+
+ targ_essent_len = bpf_core_essential_name_len(targ_name);
+ if (targ_essent_len != local_essent_len)
+ continue;
+
+ if (strncmp(local_name, targ_name, local_essent_len) == 0) {
+ pr_debug("[%d] %s: found candidate [%d] %s\n",
+ local_type_id, local_name, i, targ_name);
+ new_ids = realloc(cand_ids->data, cand_ids->len + 1);
+ if (!new_ids) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ cand_ids->data = new_ids;
+ cand_ids->data[cand_ids->len++] = i;
+ }
+ }
+ return cand_ids;
+err_out:
+ bpf_core_free_cands(cand_ids);
+ return ERR_PTR(err);
+}
+
+/* Check two types for compatibility, skipping const/volatile/restrict and
+ * typedefs, to ensure we are relocating offset to the compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible;
+ * - any two PTRs are always compatible;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and bitness should match, signedness is ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+static int bpf_core_fields_are_compat(const struct btf *local_btf,
+ __u32 local_id,
+ const struct btf *targ_btf,
+ __u32 targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+
+recur:
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (btf_is_composite(local_type) && btf_is_composite(targ_type))
+ return 1;
+ if (btf_kind(local_type) != btf_kind(targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_PTR:
+ return 1;
+ case BTF_KIND_ENUM:
+ return local_type->size == targ_type->size;
+ case BTF_KIND_INT:
+ return btf_int_offset(local_type) == 0 &&
+ btf_int_offset(targ_type) == 0 &&
+ local_type->size == targ_type->size &&
+ btf_int_bits(local_type) == btf_int_bits(targ_type);
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ default:
+ pr_warning("unexpected kind %d relocated, local [%d], target [%d]\n",
+ btf_kind(local_type), local_id, targ_id);
+ return 0;
+ }
+}
+
+/*
+ * Given single high-level named field accessor in local type, find
+ * corresponding high-level accessor for a target type. Along the way,
+ * maintain low-level spec for target as well. Also keep updating target
+ * offset.
+ *
+ * Searching is performed through recursive exhaustive enumeration of all
+ * fields of a struct/union. If there are any anonymous (embedded)
+ * structs/unions, they are recursively searched as well. If field with
+ * desired name is found, check compatibility between local and target types,
+ * before returning result.
+ *
+ * 1 is returned, if field is found.
+ * 0 is returned if no compatible field is found.
+ * <0 is returned on error.
+ */
+static int bpf_core_match_member(const struct btf *local_btf,
+ const struct bpf_core_accessor *local_acc,
+ const struct btf *targ_btf,
+ __u32 targ_id,
+ struct bpf_core_spec *spec,
+ __u32 *next_targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+ const struct btf_member *local_member, *m;
+ const char *local_name, *targ_name;
+ __u32 local_id;
+ int i, n, found;
+
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+ if (!btf_is_composite(targ_type))
+ return 0;
+
+ local_id = local_acc->type_id;
+ local_type = btf__type_by_id(local_btf, local_id);
+ local_member = btf_members(local_type) + local_acc->idx;
+ local_name = btf__name_by_offset(local_btf, local_member->name_off);
+
+ n = btf_vlen(targ_type);
+ m = btf_members(targ_type);
+ for (i = 0; i < n; i++, m++) {
+ __u32 offset;
+
+ /* bitfield relocations not supported */
+ if (btf_member_bitfield_size(targ_type, i))
+ continue;
+ offset = btf_member_bit_offset(targ_type, i);
+ if (offset % 8)
+ continue;
+
+ /* too deep struct/union/array nesting */
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ /* speculate this member will be the good one */
+ spec->offset += offset / 8;
+ spec->raw_spec[spec->raw_len++] = i;
+
+ targ_name = btf__name_by_offset(targ_btf, m->name_off);
+ if (str_is_empty(targ_name)) {
+ /* embedded struct/union, we need to go deeper */
+ found = bpf_core_match_member(local_btf, local_acc,
+ targ_btf, m->type,
+ spec, next_targ_id);
+ if (found) /* either found or error */
+ return found;
+ } else if (strcmp(local_name, targ_name) == 0) {
+ /* matching named field */
+ struct bpf_core_accessor *targ_acc;
+
+ targ_acc = &spec->spec[spec->len++];
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = i;
+ targ_acc->name = targ_name;
+
+ *next_targ_id = m->type;
+ found = bpf_core_fields_are_compat(local_btf,
+ local_member->type,
+ targ_btf, m->type);
+ if (!found)
+ spec->len--; /* pop accessor */
+ return found;
+ }
+ /* member turned out not to be what we looked for */
+ spec->offset -= offset / 8;
+ spec->raw_len--;
+ }
+
+ return 0;
+}
+
+/*
+ * Try to match local spec to a target type and, if successful, produce full
+ * target spec (high-level, low-level + offset).
+ */
+static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
+ const struct btf *targ_btf, __u32 targ_id,
+ struct bpf_core_spec *targ_spec)
+{
+ const struct btf_type *targ_type;
+ const struct bpf_core_accessor *local_acc;
+ struct bpf_core_accessor *targ_acc;
+ int i, sz, matched;
+
+ memset(targ_spec, 0, sizeof(*targ_spec));
+ targ_spec->btf = targ_btf;
+
+ local_acc = &local_spec->spec[0];
+ targ_acc = &targ_spec->spec[0];
+
+ for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
+ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
+ &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+
+ if (local_acc->name) {
+ matched = bpf_core_match_member(local_spec->btf,
+ local_acc,
+ targ_btf, targ_id,
+ targ_spec, &targ_id);
+ if (matched <= 0)
+ return matched;
+ } else {
+ /* for i=0, targ_id is already treated as array element
+ * type (because it's the original struct), for others
+ * we should find array element type first
+ */
+ if (i > 0) {
+ const struct btf_array *a;
+
+ if (!btf_is_array(targ_type))
+ return 0;
+
+ a = btf_array(targ_type);
+ if (local_acc->idx >= a->nelems)
+ return 0;
+ if (!skip_mods_and_typedefs(targ_btf, a->type,
+ &targ_id))
+ return -EINVAL;
+ }
+
+ /* too deep struct/union/array nesting */
+ if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = local_acc->idx;
+ targ_acc->name = NULL;
+ targ_spec->len++;
+ targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
+ targ_spec->raw_len++;
+
+ sz = btf__resolve_size(targ_btf, targ_id);
+ if (sz < 0)
+ return sz;
+ targ_spec->offset += local_acc->idx * sz;
+ }
+ }
+
+ return 1;
+}
+
+/*
+ * Patch relocatable BPF instruction.
+ * Expected insn->imm value is provided for validation, as well as the new
+ * relocated value.
+ *
+ * Currently three kinds of BPF instructions are supported:
+ * 1. rX = <imm> (assignment with immediate operand);
+ * 2. rX += <imm> (arithmetic operations with immediate operand);
+ * 3. *(rX) = <imm> (indirect memory assignment with immediate operand).
+ *
+ * If actual insn->imm value is wrong, bail out.
+ */
+static int bpf_core_reloc_insn(struct bpf_program *prog, int insn_off,
+ __u32 orig_off, __u32 new_off)
+{
+ struct bpf_insn *insn;
+ int insn_idx;
+ __u8 class;
+
+ if (insn_off % sizeof(struct bpf_insn))
+ return -EINVAL;
+ insn_idx = insn_off / sizeof(struct bpf_insn);
+
+ insn = &prog->insns[insn_idx];
+ class = BPF_CLASS(insn->code);
+
+ if (class == BPF_ALU || class == BPF_ALU64) {
+ if (BPF_SRC(insn->code) != BPF_K)
+ return -EINVAL;
+ if (insn->imm != orig_off)
+ return -EINVAL;
+ insn->imm = new_off;
+ pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
+ bpf_program__title(prog, false),
+ insn_idx, orig_off, new_off);
+ } else {
+ pr_warning("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
+ bpf_program__title(prog, false),
+ insn_idx, insn->code, insn->src_reg, insn->dst_reg,
+ insn->off, insn->imm);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct btf *btf_load_raw(const char *path)
+{
+ struct btf *btf;
+ size_t read_cnt;
+ struct stat st;
+ void *data;
+ FILE *f;
+
+ if (stat(path, &st))
+ return ERR_PTR(-errno);
+
+ data = malloc(st.st_size);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ f = fopen(path, "rb");
+ if (!f) {
+ btf = ERR_PTR(-errno);
+ goto cleanup;
+ }
+
+ read_cnt = fread(data, 1, st.st_size, f);
+ fclose(f);
+ if (read_cnt < st.st_size) {
+ btf = ERR_PTR(-EBADF);
+ goto cleanup;
+ }
+
+ btf = btf__new(data, read_cnt);
+
+cleanup:
+ free(data);
+ return btf;
+}
+
+/*
+ * Probe few well-known locations for vmlinux kernel image and try to load BTF
+ * data out of it to use for target BTF.
+ */
+static struct btf *bpf_core_find_kernel_btf(void)
+{
+ struct {
+ const char *path_fmt;
+ bool raw_btf;
+ } locations[] = {
+ /* try canonical vmlinux BTF through sysfs first */
+ { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
+ /* fall back to trying to find vmlinux ELF on disk otherwise */
+ { "/boot/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/vmlinux-%1$s" },
+ { "/lib/modules/%1$s/build/vmlinux" },
+ { "/usr/lib/modules/%1$s/kernel/vmlinux" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s" },
+ { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
+ { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
+ };
+ char path[PATH_MAX + 1];
+ struct utsname buf;
+ struct btf *btf;
+ int i;
+
+ uname(&buf);
+
+ for (i = 0; i < ARRAY_SIZE(locations); i++) {
+ snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
+
+ if (access(path, R_OK))
+ continue;
+
+ if (locations[i].raw_btf)
+ btf = btf_load_raw(path);
+ else
+ btf = btf__parse_elf(path, NULL);
+
+ pr_debug("loading kernel BTF '%s': %ld\n",
+ path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
+ if (IS_ERR(btf))
+ continue;
+
+ return btf;
+ }
+
+ pr_warning("failed to find valid kernel BTF\n");
+ return ERR_PTR(-ESRCH);
+}
+
+/* Output spec definition in the format:
+ * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
+ * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
+ */
+static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
+{
+ const struct btf_type *t;
+ const char *s;
+ __u32 type_id;
+ int i;
+
+ type_id = spec->spec[0].type_id;
+ t = btf__type_by_id(spec->btf, type_id);
+ s = btf__name_by_offset(spec->btf, t->name_off);
+ libbpf_print(level, "[%u] %s + ", type_id, s);
+
+ for (i = 0; i < spec->raw_len; i++)
+ libbpf_print(level, "%d%s", spec->raw_spec[i],
+ i == spec->raw_len - 1 ? " => " : ":");
+
+ libbpf_print(level, "%u @ &x", spec->offset);
+
+ for (i = 0; i < spec->len; i++) {
+ if (spec->spec[i].name)
+ libbpf_print(level, ".%s", spec->spec[i].name);
+ else
+ libbpf_print(level, "[%u]", spec->spec[i].idx);
+ }
+
+}
+
+static size_t bpf_core_hash_fn(const void *key, void *ctx)
+{
+ return (size_t)key;
+}
+
+static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+static void *u32_as_hash_key(__u32 x)
+{
+ return (void *)(uintptr_t)x;
+}
+
+/*
+ * CO-RE relocate single instruction.
+ *
+ * The outline and important points of the algorithm:
+ * 1. For given local type, find corresponding candidate target types.
+ * Candidate type is a type with the same "essential" name, ignoring
+ * everything after last triple underscore (___). E.g., `sample`,
+ * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
+ * for each other. Names with triple underscore are referred to as
+ * "flavors" and are useful, among other things, to allow to
+ * specify/support incompatible variations of the same kernel struct, which
+ * might differ between different kernel versions and/or build
+ * configurations.
+ *
+ * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
+ * converter, when deduplicated BTF of a kernel still contains more than
+ * one different types with the same name. In that case, ___2, ___3, etc
+ * are appended starting from second name conflict. But start flavors are
+ * also useful to be defined "locally", in BPF program, to extract same
+ * data from incompatible changes between different kernel
+ * versions/configurations. For instance, to handle field renames between
+ * kernel versions, one can use two flavors of the struct name with the
+ * same common name and use conditional relocations to extract that field,
+ * depending on target kernel version.
+ * 2. For each candidate type, try to match local specification to this
+ * candidate target type. Matching involves finding corresponding
+ * high-level spec accessors, meaning that all named fields should match,
+ * as well as all array accesses should be within the actual bounds. Also,
+ * types should be compatible (see bpf_core_fields_are_compat for details).
+ * 3. It is supported and expected that there might be multiple flavors
+ * matching the spec. As long as all the specs resolve to the same set of
+ * offsets across all candidates, there is not error. If there is any
+ * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
+ * imprefection of BTF deduplication, which can cause slight duplication of
+ * the same BTF type, if some directly or indirectly referenced (by
+ * pointer) type gets resolved to different actual types in different
+ * object files. If such situation occurs, deduplicated BTF will end up
+ * with two (or more) structurally identical types, which differ only in
+ * types they refer to through pointer. This should be OK in most cases and
+ * is not an error.
+ * 4. Candidate types search is performed by linearly scanning through all
+ * types in target BTF. It is anticipated that this is overall more
+ * efficient memory-wise and not significantly worse (if not better)
+ * CPU-wise compared to prebuilding a map from all local type names to
+ * a list of candidate type names. It's also sped up by caching resolved
+ * list of matching candidates per each local "root" type ID, that has at
+ * least one bpf_offset_reloc associated with it. This list is shared
+ * between multiple relocations for the same type ID and is updated as some
+ * of the candidates are pruned due to structural incompatibility.
+ */
+static int bpf_core_reloc_offset(struct bpf_program *prog,
+ const struct bpf_offset_reloc *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ const struct btf *targ_btf,
+ struct hashmap *cand_cache)
+{
+ const char *prog_name = bpf_program__title(prog, false);
+ struct bpf_core_spec local_spec, cand_spec, targ_spec;
+ const void *type_key = u32_as_hash_key(relo->type_id);
+ const struct btf_type *local_type, *cand_type;
+ const char *local_name, *cand_name;
+ struct ids_vec *cand_ids;
+ __u32 local_id, cand_id;
+ const char *spec_str;
+ int i, j, err;
+
+ local_id = relo->type_id;
+ local_type = btf__type_by_id(local_btf, local_id);
+ if (!local_type)
+ return -EINVAL;
+
+ local_name = btf__name_by_offset(local_btf, local_type->name_off);
+ if (str_is_empty(local_name))
+ return -EINVAL;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
+ if (str_is_empty(spec_str))
+ return -EINVAL;
+
+ err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
+ prog_name, relo_idx, local_id, local_name, spec_str,
+ err);
+ return -EINVAL;
+ }
+
+ pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
+ libbpf_print(LIBBPF_DEBUG, "\n");
+
+ if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
+ cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
+ if (IS_ERR(cand_ids)) {
+ pr_warning("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
+ prog_name, relo_idx, local_id, local_name,
+ PTR_ERR(cand_ids));
+ return PTR_ERR(cand_ids);
+ }
+ err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
+ if (err) {
+ bpf_core_free_cands(cand_ids);
+ return err;
+ }
+ }
+
+ for (i = 0, j = 0; i < cand_ids->len; i++) {
+ cand_id = cand_ids->data[i];
+ cand_type = btf__type_by_id(targ_btf, cand_id);
+ cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
+
+ err = bpf_core_spec_match(&local_spec, targ_btf,
+ cand_id, &cand_spec);
+ pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
+ prog_name, relo_idx, i, cand_name);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
+ libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
+ if (err < 0) {
+ pr_warning("prog '%s': relo #%d: matching error: %d\n",
+ prog_name, relo_idx, err);
+ return err;
+ }
+ if (err == 0)
+ continue;
+
+ if (j == 0) {
+ targ_spec = cand_spec;
+ } else if (cand_spec.offset != targ_spec.offset) {
+ /* if there are many candidates, they should all
+ * resolve to the same offset
+ */
+ pr_warning("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
+ prog_name, relo_idx, cand_spec.offset,
+ targ_spec.offset);
+ return -EINVAL;
+ }
+
+ cand_ids->data[j++] = cand_spec.spec[0].type_id;
+ }
+
+ cand_ids->len = j;
+ if (cand_ids->len == 0) {
+ pr_warning("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
+ prog_name, relo_idx, local_id, local_name, spec_str);
+ return -ESRCH;
+ }
+
+ err = bpf_core_reloc_insn(prog, relo->insn_off,
+ local_spec.offset, targ_spec.offset);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
+ prog_name, relo_idx, relo->insn_off, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+bpf_core_reloc_offsets(struct bpf_object *obj, const char *targ_btf_path)
+{
+ const struct btf_ext_info_sec *sec;
+ const struct bpf_offset_reloc *rec;
+ const struct btf_ext_info *seg;
+ struct hashmap_entry *entry;
+ struct hashmap *cand_cache = NULL;
+ struct bpf_program *prog;
+ struct btf *targ_btf;
+ const char *sec_name;
+ int i, err = 0;
+
+ if (targ_btf_path)
+ targ_btf = btf__parse_elf(targ_btf_path, NULL);
+ else
+ targ_btf = bpf_core_find_kernel_btf();
+ if (IS_ERR(targ_btf)) {
+ pr_warning("failed to get target BTF: %ld\n",
+ PTR_ERR(targ_btf));
+ return PTR_ERR(targ_btf);
+ }
+
+ cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
+ if (IS_ERR(cand_cache)) {
+ err = PTR_ERR(cand_cache);
+ goto out;
+ }
+
+ seg = &obj->btf_ext->offset_reloc_info;
+ for_each_btf_ext_sec(seg, sec) {
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name)) {
+ err = -EINVAL;
+ goto out;
+ }
+ prog = bpf_object__find_program_by_title(obj, sec_name);
+ if (!prog) {
+ pr_warning("failed to find program '%s' for CO-RE offset relocation\n",
+ sec_name);
+ err = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
+ sec_name, sec->num_info);
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = bpf_core_reloc_offset(prog, rec, i, obj->btf,
+ targ_btf, cand_cache);
+ if (err) {
+ pr_warning("prog '%s': relo #%d: failed to relocate: %d\n",
+ sec_name, i, err);
+ goto out;
+ }
+ }
+ }
+
+out:
+ btf__free(targ_btf);
+ if (!IS_ERR_OR_NULL(cand_cache)) {
+ hashmap__for_each_entry(cand_cache, entry, i) {
+ bpf_core_free_cands(entry->value);
+ }
+ hashmap__free(cand_cache);
+ }
+ return err;
+}
+
+static int
+bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
+{
+ int err = 0;
+
+ if (obj->btf_ext->offset_reloc_info.len)
+ err = bpf_core_reloc_offsets(obj, targ_btf_path);
+
+ return err;
+}
+
static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
@@ -2395,14 +3291,21 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
return 0;
}
-
static int
-bpf_object__relocate(struct bpf_object *obj)
+bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
size_t i;
int err;
+ if (obj->btf_ext) {
+ err = bpf_object__relocate_core(obj, targ_btf_path);
+ if (err) {
+ pr_warning("failed to perform CO-RE relocations: %d\n",
+ err);
+ return err;
+ }
+ }
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
@@ -2808,7 +3711,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
obj->loaded = true;
CHECK_ERR(bpf_object__create_maps(obj), err, out);
- CHECK_ERR(bpf_object__relocate(obj), err, out);
+ CHECK_ERR(bpf_object__relocate(obj, attr->target_btf_path), err, out);
CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
return 0;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5cbf459ece0b..e8f70977d137 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -57,7 +57,7 @@ enum libbpf_print_level {
typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
const char *, va_list ap);
-LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
+LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
@@ -92,6 +92,7 @@ LIBBPF_API void bpf_object__close(struct bpf_object *object);
struct bpf_object_load_attr {
struct bpf_object *obj;
int log_level;
+ const char *target_btf_path;
};
/* Load/unload object into/from kernel */
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index f9d316e873d8..d04c7cb623ed 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -183,4 +183,10 @@ LIBBPF_0.0.4 {
perf_buffer__new;
perf_buffer__new_raw;
perf_buffer__poll;
+ xsk_umem__create;
} LIBBPF_0.0.3;
+
+LIBBPF_0.0.5 {
+ global:
+ bpf_btf_get_next_id;
+} LIBBPF_0.0.4;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 2ac29bd36226..2e83a34f8c79 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -29,6 +29,10 @@
#ifndef max
# define max(x, y) ((x) < (y) ? (y) : (x))
#endif
+#ifndef offsetofend
+# define offsetofend(TYPE, FIELD) \
+ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
+#endif
extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...)
@@ -46,4 +50,105 @@ do { \
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
+struct btf_ext_info {
+ /*
+ * info points to the individual info section (e.g. func_info and
+ * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
+ */
+ void *info;
+ __u32 rec_size;
+ __u32 len;
+};
+
+#define for_each_btf_ext_sec(seg, sec) \
+ for (sec = (seg)->info; \
+ (void *)sec < (seg)->info + (seg)->len; \
+ sec = (void *)sec + sizeof(struct btf_ext_info_sec) + \
+ (seg)->rec_size * sec->num_info)
+
+#define for_each_btf_ext_rec(seg, sec, i, rec) \
+ for (i = 0, rec = (void *)&(sec)->data; \
+ i < (sec)->num_info; \
+ i++, rec = (void *)rec + (seg)->rec_size)
+
+struct btf_ext {
+ union {
+ struct btf_ext_header *hdr;
+ void *data;
+ };
+ struct btf_ext_info func_info;
+ struct btf_ext_info line_info;
+ struct btf_ext_info offset_reloc_info;
+ __u32 data_size;
+};
+
+struct btf_ext_info_sec {
+ __u32 sec_name_off;
+ __u32 num_info;
+ /* Followed by num_info * record_size number of bytes */
+ __u8 data[0];
+};
+
+/* The minimum bpf_func_info checked by the loader */
+struct bpf_func_info_min {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+/* The minimum bpf_line_info checked by the loader */
+struct bpf_line_info_min {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
+/* The minimum bpf_offset_reloc checked by the loader
+ *
+ * Offset relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ * its insn->imm field to be relocated with actual offset;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ * offset;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * itself encodes an accessed field using a sequence of field and array
+ * indicies, separated by colon (:). It's conceptually very close to LLVM's
+ * getelementptr ([0]) instruction's arguments for identifying offset to
+ * a field.
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit offset relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_offset_reloc {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+};
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index ace1a0708d99..4b0b0364f5fc 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -244,6 +244,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 680e63066cf3..842c4fd55859 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -74,23 +74,6 @@ struct xsk_nl_info {
int fd;
};
-/* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
- * Unfortunately, it is not part of glibc.
- */
-static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
- int fd, __u64 offset)
-{
-#ifdef __NR_mmap2
- unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
- long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
- (off_t)(offset >> page_shift));
-
- return (void *)ret;
-#else
- return mmap(addr, length, prot, flags, fd, offset);
-#endif
-}
-
int xsk_umem__fd(const struct xsk_umem *umem)
{
return umem ? umem->fd : -EINVAL;
@@ -116,6 +99,7 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
+ cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
return;
}
@@ -123,6 +107,7 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
cfg->comp_size = usr_cfg->comp_size;
cfg->frame_size = usr_cfg->frame_size;
cfg->frame_headroom = usr_cfg->frame_headroom;
+ cfg->flags = usr_cfg->flags;
}
static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
@@ -149,9 +134,10 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
return 0;
}
-int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
- struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
+int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
+ __u64 size, struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
{
struct xdp_mmap_offsets off;
struct xdp_umem_reg mr;
@@ -182,6 +168,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
mr.len = size;
mr.chunk_size = umem->config.frame_size;
mr.headroom = umem->config.frame_headroom;
+ mr.flags = umem->config.flags;
err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
if (err) {
@@ -210,10 +197,9 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
goto out_socket;
}
- map = xsk_mmap(NULL, off.fr.desc +
- umem->config.fill_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- umem->fd, XDP_UMEM_PGOFF_FILL_RING);
+ map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
+ XDP_UMEM_PGOFF_FILL_RING);
if (map == MAP_FAILED) {
err = -errno;
goto out_socket;
@@ -224,13 +210,13 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
fill->size = umem->config.fill_size;
fill->producer = map + off.fr.producer;
fill->consumer = map + off.fr.consumer;
+ fill->flags = map + off.fr.flags;
fill->ring = map + off.fr.desc;
fill->cached_cons = umem->config.fill_size;
- map = xsk_mmap(NULL,
- off.cr.desc + umem->config.comp_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
+ map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
+ XDP_UMEM_PGOFF_COMPLETION_RING);
if (map == MAP_FAILED) {
err = -errno;
goto out_mmap;
@@ -241,6 +227,7 @@ int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
comp->size = umem->config.comp_size;
comp->producer = map + off.cr.producer;
comp->consumer = map + off.cr.consumer;
+ comp->flags = map + off.cr.flags;
comp->ring = map + off.cr.desc;
*umem_ptr = umem;
@@ -255,6 +242,29 @@ out_umem_alloc:
return err;
}
+struct xsk_umem_config_v1 {
+ __u32 fill_size;
+ __u32 comp_size;
+ __u32 frame_size;
+ __u32 frame_headroom;
+};
+
+int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
+ __u64 size, struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *usr_config)
+{
+ struct xsk_umem_config config;
+
+ memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
+ config.flags = 0;
+
+ return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
+ &config);
+}
+asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
+asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
static const int log_buf_size = 16 * 1024;
@@ -550,11 +560,10 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
}
if (rx) {
- rx_map = xsk_mmap(NULL, off.rx.desc +
- xsk->config.rx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_RX_RING);
+ rx_map = mmap(NULL, off.rx.desc +
+ xsk->config.rx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_RX_RING);
if (rx_map == MAP_FAILED) {
err = -errno;
goto out_socket;
@@ -564,16 +573,16 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
rx->size = xsk->config.rx_size;
rx->producer = rx_map + off.rx.producer;
rx->consumer = rx_map + off.rx.consumer;
+ rx->flags = rx_map + off.rx.flags;
rx->ring = rx_map + off.rx.desc;
}
xsk->rx = rx;
if (tx) {
- tx_map = xsk_mmap(NULL, off.tx.desc +
- xsk->config.tx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_TX_RING);
+ tx_map = mmap(NULL, off.tx.desc +
+ xsk->config.tx_size * sizeof(struct xdp_desc),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
+ xsk->fd, XDP_PGOFF_TX_RING);
if (tx_map == MAP_FAILED) {
err = -errno;
goto out_mmap_rx;
@@ -583,6 +592,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
tx->size = xsk->config.tx_size;
tx->producer = tx_map + off.tx.producer;
tx->consumer = tx_map + off.tx.consumer;
+ tx->flags = tx_map + off.tx.flags;
tx->ring = tx_map + off.tx.desc;
tx->cached_cons = xsk->config.tx_size;
}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 833a6e60d065..584f6820a639 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -32,6 +32,7 @@ struct name { \
__u32 *producer; \
__u32 *consumer; \
void *ring; \
+ __u32 *flags; \
}
DEFINE_XSK_RING(xsk_ring_prod);
@@ -76,6 +77,11 @@ xsk_ring_cons__rx_desc(const struct xsk_ring_cons *rx, __u32 idx)
return &descs[idx & rx->mask];
}
+static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r)
+{
+ return *r->flags & XDP_RING_NEED_WAKEUP;
+}
+
static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
{
__u32 free_entries = r->cached_cons - r->cached_prod;
@@ -162,6 +168,21 @@ static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
return &((char *)umem_area)[addr];
}
+static inline __u64 xsk_umem__extract_addr(__u64 addr)
+{
+ return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
+}
+
+static inline __u64 xsk_umem__extract_offset(__u64 addr)
+{
+ return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+}
+
+static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr)
+{
+ return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr);
+}
+
LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem);
LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
@@ -170,12 +191,14 @@ LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk);
#define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */
#define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT)
#define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0
+#define XSK_UMEM__DEFAULT_FLAGS 0
struct xsk_umem_config {
__u32 fill_size;
__u32 comp_size;
__u32 frame_size;
__u32 frame_headroom;
+ __u32 flags;
};
/* Flags for the libbpf_flags field. */
@@ -195,6 +218,16 @@ LIBBPF_API int xsk_umem__create(struct xsk_umem **umem,
struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *config);
+LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
+LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
+ void *umem_area, __u64 size,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_umem_config *config);
LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
const char *ifname, __u32 queue_id,
struct xsk_umem *umem,