summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r--tools/lib/bpf/libbpf.c416
1 files changed, 337 insertions, 79 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 6c2979f1b471..efab29b8935b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -185,6 +185,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
+ [BPF_MAP_TYPE_ARENA] = "arena",
};
static const char * const prog_type_name[] = {
@@ -497,6 +498,7 @@ struct bpf_struct_ops {
#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
+#define ARENA_SEC ".arena.1"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -612,6 +614,7 @@ enum sec_type {
SEC_BSS,
SEC_DATA,
SEC_RODATA,
+ SEC_ST_OPS,
};
struct elf_sec_desc {
@@ -627,8 +630,7 @@ struct elf_state {
Elf *elf;
Elf64_Ehdr *ehdr;
Elf_Data *symbols;
- Elf_Data *st_ops_data;
- Elf_Data *st_ops_link_data;
+ Elf_Data *arena_data;
size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct elf_sec_desc *secs;
@@ -637,8 +639,8 @@ struct elf_state {
__u32 btf_maps_sec_btf_id;
int text_shndx;
int symbols_shndx;
- int st_ops_shndx;
- int st_ops_link_shndx;
+ bool has_st_ops;
+ int arena_data_shndx;
};
struct usdt_manager;
@@ -698,6 +700,10 @@ struct bpf_object {
struct usdt_manager *usdt_man;
+ struct bpf_map *arena_map;
+ void *arena_data;
+ size_t arena_data_sz;
+
struct kern_feature_cache *feat_cache;
char *token_path;
int token_fd;
@@ -948,7 +954,7 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind);
static int
-find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
struct module_btf **mod_btf,
const struct btf_type **type, __u32 *type_id,
const struct btf_type **vtype, __u32 *vtype_id,
@@ -958,8 +964,12 @@ find_struct_ops_kern_types(struct bpf_object *obj, const char *tname,
const struct btf_member *kern_data_member;
struct btf *btf;
__s32 kern_vtype_id, kern_type_id;
+ char tname[256];
__u32 i;
+ snprintf(tname, sizeof(tname), "%.*s",
+ (int)bpf_core_essential_name_len(tname_raw), tname_raw);
+
kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
&btf, mod_btf);
if (kern_type_id < 0) {
@@ -1027,6 +1037,48 @@ static bool is_valid_st_ops_program(struct bpf_object *obj,
return false;
}
+/* For each struct_ops program P, referenced from some struct_ops map M,
+ * enable P.autoload if there are Ms for which M.autocreate is true,
+ * disable P.autoload if for all Ms M.autocreate is false.
+ * Don't change P.autoload for programs that are not referenced from any maps.
+ */
+static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
+{
+ struct bpf_program *prog, *slot_prog;
+ struct bpf_map *map;
+ int i, j, k, vlen;
+
+ for (i = 0; i < obj->nr_programs; ++i) {
+ int should_load = false;
+ int use_cnt = 0;
+
+ prog = &obj->programs[i];
+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
+ continue;
+
+ for (j = 0; j < obj->nr_maps; ++j) {
+ map = &obj->maps[j];
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ vlen = btf_vlen(map->st_ops->type);
+ for (k = 0; k < vlen; ++k) {
+ slot_prog = map->st_ops->progs[k];
+ if (prog != slot_prog)
+ continue;
+
+ use_cnt++;
+ if (map->autocreate)
+ should_load = true;
+ }
+ }
+ if (use_cnt)
+ prog->autoload = should_load;
+ }
+
+ return 0;
+}
+
/* Init the map's fields that depend on kern_btf */
static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
{
@@ -1142,8 +1194,32 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
if (mod_btf)
prog->attach_btf_obj_fd = mod_btf->fd;
- prog->attach_btf_id = kern_type_id;
- prog->expected_attach_type = kern_member_idx;
+
+ /* if we haven't yet processed this BPF program, record proper
+ * attach_btf_id and member_idx
+ */
+ if (!prog->attach_btf_id) {
+ prog->attach_btf_id = kern_type_id;
+ prog->expected_attach_type = kern_member_idx;
+ }
+
+ /* struct_ops BPF prog can be re-used between multiple
+ * .struct_ops & .struct_ops.link as long as it's the
+ * same struct_ops struct definition and the same
+ * function pointer field
+ */
+ if (prog->attach_btf_id != kern_type_id) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->attach_btf_id, kern_type_id);
+ return -EINVAL;
+ }
+ if (prog->expected_attach_type != kern_member_idx) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->expected_attach_type, kern_member_idx);
+ return -EINVAL;
+ }
st_ops->kern_func_off[i] = kern_data_off + kern_moff;
@@ -1184,6 +1260,9 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
if (!bpf_map__is_struct_ops(map))
continue;
+ if (!map->autocreate)
+ continue;
+
err = bpf_map__init_kern_struct_ops(map);
if (err)
return err;
@@ -1193,7 +1272,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
}
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
- int shndx, Elf_Data *data, __u32 map_flags)
+ int shndx, Elf_Data *data)
{
const struct btf_type *type, *datasec;
const struct btf_var_secinfo *vsi;
@@ -1251,11 +1330,20 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
return -ENOMEM;
map->btf_value_type_id = type_id;
+ /* Follow same convention as for programs autoload:
+ * SEC("?.struct_ops") means map is not created by default.
+ */
+ if (sec_name[0] == '?') {
+ map->autocreate = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ }
+
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
map->def.key_size = sizeof(int);
map->def.value_size = type->size;
map->def.max_entries = 1;
- map->def.map_flags = map_flags;
+ map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -1290,15 +1378,25 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
static int bpf_object_init_struct_ops(struct bpf_object *obj)
{
- int err;
+ const char *sec_name;
+ int sec_idx, err;
- err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
- obj->efile.st_ops_data, 0);
- err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
- obj->efile.st_ops_link_shndx,
- obj->efile.st_ops_link_data,
- BPF_F_LINK);
- return err;
+ for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
+ struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
+
+ if (desc->sec_type != SEC_ST_OPS)
+ continue;
+
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ if (!sec_name)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static struct bpf_object *bpf_object__new(const char *path,
@@ -1336,8 +1434,6 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
- obj->efile.st_ops_shndx = -1;
- obj->efile.st_ops_link_shndx = -1;
obj->kconfig_map_idx = -1;
obj->kern_version = get_kernel_version();
@@ -1354,8 +1450,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
elf_end(obj->efile.elf);
obj->efile.elf = NULL;
obj->efile.symbols = NULL;
- obj->efile.st_ops_data = NULL;
- obj->efile.st_ops_link_data = NULL;
+ obj->efile.arena_data = NULL;
zfree(&obj->efile.secs);
obj->efile.sec_cnt = 0;
@@ -1598,7 +1693,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return map;
}
-static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
+static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
{
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
@@ -1608,6 +1703,20 @@ static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
return map_sz;
}
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+
+ switch (map->def.type) {
+ case BPF_MAP_TYPE_ARRAY:
+ return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ case BPF_MAP_TYPE_ARENA:
+ return page_sz * map->def.max_entries;
+ default:
+ return 0; /* not supported */
+ }
+}
+
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
{
void *mmaped;
@@ -1750,7 +1859,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->value_size = data_sz;
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
- ? BPF_F_RDONLY_PROG : 0;
+ ? BPF_F_RDONLY_PROG : 0;
/* failures are fine because of maps like .rodata.str1.1 */
(void) map_fill_btf_type_info(obj, map);
@@ -1761,7 +1870,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ mmap_sz = bpf_map_mmap_sz(map);
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
@@ -2249,6 +2358,46 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static bool get_map_field_long(const char *map_name, const struct btf *btf,
+ const struct btf_member *m, __u64 *res)
+{
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
+ const char *name = btf__name_by_offset(btf, m->name_off);
+
+ if (btf_is_ptr(t)) {
+ __u32 res32;
+ bool ret;
+
+ ret = get_map_field_int(map_name, btf, m, &res32);
+ if (ret)
+ *res = (__u64)res32;
+ return ret;
+ }
+
+ if (!btf_is_enum(t) && !btf_is_enum64(t)) {
+ pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
+ map_name, name, btf_kind_str(t));
+ return false;
+ }
+
+ if (btf_vlen(t) != 1) {
+ pr_warn("map '%s': attr '%s': invalid __ulong\n",
+ map_name, name);
+ return false;
+ }
+
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e = btf_enum(t);
+
+ *res = e->val;
+ } else {
+ const struct btf_enum64 *e = btf_enum64(t);
+
+ *res = btf_enum64_value(e);
+ }
+ return true;
+}
+
static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
{
int len;
@@ -2482,9 +2631,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
map_def->pinning = val;
map_def->parts |= MAP_DEF_PINNING;
} else if (strcmp(name, "map_extra") == 0) {
- __u32 map_extra;
+ __u64 map_extra;
- if (!get_map_field_int(map_name, btf, m, &map_extra))
+ if (!get_map_field_long(map_name, btf, m, &map_extra))
return -EINVAL;
map_def->map_extra = map_extra;
map_def->parts |= MAP_DEF_MAP_EXTRA;
@@ -2702,6 +2851,32 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return 0;
}
+static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
+ const char *sec_name, int sec_idx,
+ void *data, size_t data_sz)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t mmap_sz;
+
+ mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ if (roundup(data_sz, page_sz) > mmap_sz) {
+ pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
+ sec_name, mmap_sz, data_sz);
+ return -E2BIG;
+ }
+
+ obj->arena_data = malloc(data_sz);
+ if (!obj->arena_data)
+ return -ENOMEM;
+ memcpy(obj->arena_data, data, data_sz);
+ obj->arena_data_sz = data_sz;
+
+ /* make bpf_map__init_value() work for ARENA maps */
+ map->mmaped = obj->arena_data;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
const char *pin_root_path)
{
@@ -2751,6 +2926,33 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
+ for (i = 0; i < obj->nr_maps; i++) {
+ struct bpf_map *map = &obj->maps[i];
+
+ if (map->def.type != BPF_MAP_TYPE_ARENA)
+ continue;
+
+ if (obj->arena_map) {
+ pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
+ map->name, obj->arena_map->name);
+ return -EINVAL;
+ }
+ obj->arena_map = map;
+
+ if (obj->efile.arena_data) {
+ err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
+ obj->efile.arena_data->d_buf,
+ obj->efile.arena_data->d_size);
+ if (err)
+ return err;
+ }
+ }
+ if (obj->efile.arena_data && !obj->arena_map) {
+ pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
+ ARENA_SEC);
+ return -ENOENT;
+ }
+
return 0;
}
@@ -2783,6 +2985,11 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return sh->sh_flags & SHF_EXECINSTR;
}
+static bool starts_with_qmark(const char *s)
+{
+ return s && s[0] == '?';
+}
+
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
@@ -2792,9 +2999,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
return !has_func || !has_datasec || !has_func_global || !has_float ||
- !has_decl_tag || !has_type_tag || !has_enum64;
+ !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
}
static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2806,6 +3014,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
int enum64_placeholder_id = 0;
struct btf_type *t;
int i, j, vlen;
@@ -2832,7 +3041,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
name = (char *)btf__name_by_offset(btf, t->name_off);
while (*name) {
- if (*name == '.')
+ if (*name == '.' || *name == '?')
*name = '_';
name++;
}
@@ -2847,6 +3056,14 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
+ } else if (!has_qmark_datasec && btf_is_datasec(t) &&
+ starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
+ /* replace '?' prefix with '_' for DATASEC names */
+ char *name;
+
+ name = (char *)btf__name_by_offset(btf, t->name_off);
+ if (name[0] == '?')
+ name[0] = '_';
} else if (!has_func && btf_is_func_proto(t)) {
/* replace FUNC_PROTO with ENUM */
vlen = btf_vlen(t);
@@ -2900,14 +3117,13 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->efile.st_ops_link_shndx >= 0 ||
+ obj->efile.has_st_ops ||
obj->nr_extern > 0;
}
static bool kernel_needs_btf(const struct bpf_object *obj)
{
- return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
+ return obj->efile.has_st_ops;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -3608,12 +3824,17 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
sec_desc->sec_type = SEC_RODATA;
sec_desc->shdr = sh;
sec_desc->data = data;
- } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
- obj->efile.st_ops_data = data;
- obj->efile.st_ops_shndx = idx;
- } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
- obj->efile.st_ops_link_data = data;
- obj->efile.st_ops_link_shndx = idx;
+ } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
+ sec_desc->sec_type = SEC_ST_OPS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ obj->efile.has_st_ops = true;
+ } else if (strcmp(name, ARENA_SEC) == 0) {
+ obj->efile.arena_data = data;
+ obj->efile.arena_data_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
@@ -3629,6 +3850,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
idx, name, targ_sec_idx,
@@ -4241,6 +4464,15 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ /* arena data relocation */
+ if (shdr_idx == obj->efile.arena_data_shndx) {
+ reloc_desc->type = RELO_DATA;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
/* generic map reference relocation */
if (type == LIBBPF_MAP_UNSPEC) {
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
@@ -4781,6 +5013,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
return 0;
}
+
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -4873,6 +5106,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_ARENA:
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
@@ -5117,7 +5351,23 @@ retry:
if (err < 0)
goto err_out;
}
-
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
+ PROT_READ | PROT_WRITE,
+ map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
+ map->fd, 0);
+ if (map->mmaped == MAP_FAILED) {
+ err = -errno;
+ map->mmaped = NULL;
+ pr_warn("map '%s': failed to mmap arena: %d\n",
+ map->name, err);
+ return err;
+ }
+ if (obj->arena_data) {
+ memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
+ zfree(&obj->arena_data);
+ }
+ }
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
err = init_map_in_map_slots(obj, map);
if (err < 0)
@@ -6926,12 +7176,12 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
data = sec_desc->data;
idx = shdr->sh_info;
- if (shdr->sh_type != SHT_REL) {
+ if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
+ if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
@@ -8105,11 +8355,20 @@ static void bpf_map_prepare_vdata(const struct bpf_map *map)
static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
{
+ struct bpf_map *map;
int i;
- for (i = 0; i < obj->nr_maps; i++)
- if (bpf_map__is_struct_ops(&obj->maps[i]))
- bpf_map_prepare_vdata(&obj->maps[i]);
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
+
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ if (!map->autocreate)
+ continue;
+
+ bpf_map_prepare_vdata(map);
+ }
return 0;
}
@@ -8135,6 +8394,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
+ err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
@@ -8604,13 +8864,9 @@ static void bpf_map__destroy(struct bpf_map *map)
zfree(&map->init_slots);
map->init_slots_sz = 0;
- if (map->mmaped) {
- size_t mmap_sz;
-
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- munmap(map->mmaped, mmap_sz);
- map->mmaped = NULL;
- }
+ if (map->mmaped && map->mmaped != map->obj->arena_data)
+ munmap(map->mmaped, bpf_map_mmap_sz(map));
+ map->mmaped = NULL;
if (map->st_ops) {
zfree(&map->st_ops->data);
@@ -8670,6 +8926,8 @@ void bpf_object__close(struct bpf_object *obj)
if (obj->token_fd > 0)
close(obj->token_fd);
+ zfree(&obj->arena_data);
+
free(obj);
}
@@ -9424,27 +9682,6 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL;
}
- /* if we haven't yet processed this BPF program, record proper
- * attach_btf_id and member_idx
- */
- if (!prog->attach_btf_id) {
- prog->attach_btf_id = st_ops->type_id;
- prog->expected_attach_type = member_idx;
- }
-
- /* struct_ops BPF prog can be re-used between multiple
- * .struct_ops & .struct_ops.link as long as it's the
- * same struct_ops struct definition and the same
- * function pointer field
- */
- if (prog->attach_btf_id != st_ops->type_id ||
- prog->expected_attach_type != member_idx) {
- pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->sec_name, prog->type,
- prog->attach_btf_id, prog->expected_attach_type, name);
- return -EINVAL;
- }
-
st_ops->progs[member_idx] = prog;
/* st_ops->data will be exposed to users, being returned by
@@ -9862,11 +10099,14 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
return libbpf_err(-EBUSY);
if (map->mmaped) {
- int err;
size_t mmap_old_sz, mmap_new_sz;
+ int err;
+
+ if (map->def.type != BPF_MAP_TYPE_ARRAY)
+ return -EOPNOTSUPP;
- mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
+ mmap_old_sz = bpf_map_mmap_sz(map);
+ mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
if (err) {
pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
@@ -9899,18 +10139,26 @@ __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size)
{
+ size_t actual_sz;
+
if (map->obj->loaded || map->reused)
return libbpf_err(-EBUSY);
- if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
- size != map->def.value_size)
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
+ return libbpf_err(-EINVAL);
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ actual_sz = map->obj->arena_data_sz;
+ else
+ actual_sz = map->def.value_size;
+ if (size != actual_sz)
return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
return 0;
}
-void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
{
if (bpf_map__is_struct_ops(map)) {
if (psize)
@@ -9920,7 +10168,12 @@ void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
if (!map->mmaped)
return NULL;
- *psize = map->def.value_size;
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ *psize = map->obj->arena_data_sz;
+ else
+ *psize = map->def.value_size;
+
return map->mmaped;
}
@@ -13397,7 +13650,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
- size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
@@ -13409,6 +13662,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
continue;
}
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ *mmaped = map->mmaped;
+ continue;
+ }
+
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else