summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/arraymap.c35
-rw-r--r--kernel/bpf/bpf_cgrp_storage.c6
-rw-r--r--kernel/bpf/bpf_lsm.c12
-rw-r--r--kernel/bpf/bpf_struct_ops.c35
-rw-r--r--kernel/bpf/btf.c300
-rw-r--r--kernel/bpf/core.c50
-rw-r--r--kernel/bpf/cpumask.c20
-rw-r--r--kernel/bpf/dispatcher.c7
-rw-r--r--kernel/bpf/hashtab.c12
-rw-r--r--kernel/bpf/helpers.c78
-rw-r--r--kernel/bpf/inode.c53
-rw-r--r--kernel/bpf/log.c504
-rw-r--r--kernel/bpf/lpm_trie.c3
-rw-r--r--kernel/bpf/map_in_map.c17
-rw-r--r--kernel/bpf/map_in_map.h2
-rw-r--r--kernel/bpf/memalloc.c198
-rw-r--r--kernel/bpf/stackmap.c11
-rw-r--r--kernel/bpf/syscall.c114
-rw-r--r--kernel/bpf/task_iter.c29
-rw-r--r--kernel/bpf/tnum.c13
-rw-r--r--kernel/bpf/trampoline.c101
-rw-r--r--kernel/bpf/verifier.c2586
-rw-r--r--kernel/trace/bpf_trace.c180
23 files changed, 2574 insertions, 1792 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index c85ff9162a5c..0bdbbbeab155 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -867,11 +867,11 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
}
if (old_ptr)
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, true);
return 0;
}
-static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
+static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
void *old_ptr;
@@ -890,13 +890,18 @@ static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
}
if (old_ptr) {
- map->ops->map_fd_put_ptr(old_ptr);
+ map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
return 0;
} else {
return -ENOENT;
}
}
+static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+ return __fd_array_map_delete_elem(map, key, true);
+}
+
static void *prog_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file, int fd)
{
@@ -913,8 +918,9 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map,
return prog;
}
-static void prog_fd_array_put_ptr(void *ptr)
+static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_prog is freed after one RCU or tasks trace grace period */
bpf_prog_put(ptr);
}
@@ -924,13 +930,13 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
}
/* decrement refcnt of all bpf_progs that are stored in this map */
-static void bpf_fd_array_map_clear(struct bpf_map *map)
+static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
for (i = 0; i < array->map.max_entries; i++)
- fd_array_map_delete_elem(map, &i);
+ __fd_array_map_delete_elem(map, &i, need_defer);
}
static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
@@ -1071,7 +1077,7 @@ static void prog_array_map_clear_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_array_aux,
work)->map;
- bpf_fd_array_map_clear(map);
+ bpf_fd_array_map_clear(map, true);
bpf_map_put(map);
}
@@ -1151,7 +1157,7 @@ static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
{
struct bpf_event_entry *ee;
- ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
+ ee = kzalloc(sizeof(*ee), GFP_KERNEL);
if (ee) {
ee->event = perf_file->private_data;
ee->perf_file = perf_file;
@@ -1201,8 +1207,9 @@ err_out:
return ee;
}
-static void perf_event_fd_array_put_ptr(void *ptr)
+static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
+ /* bpf_perf_event is freed after one RCU grace period */
bpf_event_entry_free_rcu(ptr);
}
@@ -1220,7 +1227,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
for (i = 0; i < array->map.max_entries; i++) {
ee = READ_ONCE(array->ptrs[i]);
if (ee && ee->map_file == map_file)
- fd_array_map_delete_elem(map, &i);
+ __fd_array_map_delete_elem(map, &i, true);
}
rcu_read_unlock();
}
@@ -1228,7 +1235,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
static void perf_event_fd_array_map_free(struct bpf_map *map)
{
if (map->map_flags & BPF_F_PRESERVE_ELEMS)
- bpf_fd_array_map_clear(map);
+ bpf_fd_array_map_clear(map, false);
fd_array_map_free(map);
}
@@ -1256,7 +1263,7 @@ static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
return cgroup_get_from_fd(fd);
}
-static void cgroup_fd_array_put_ptr(void *ptr)
+static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
/* cgroup_put free cgrp after a rcu grace period */
cgroup_put(ptr);
@@ -1264,7 +1271,7 @@ static void cgroup_fd_array_put_ptr(void *ptr)
static void cgroup_fd_array_free(struct bpf_map *map)
{
- bpf_fd_array_map_clear(map);
+ bpf_fd_array_map_clear(map, false);
fd_array_map_free(map);
}
@@ -1309,7 +1316,7 @@ static void array_of_map_free(struct bpf_map *map)
* is protected by fdget/fdput.
*/
bpf_map_meta_free(map->inner_map_meta);
- bpf_fd_array_map_clear(map);
+ bpf_fd_array_map_clear(map, false);
fd_array_map_free(map);
}
diff --git a/kernel/bpf/bpf_cgrp_storage.c b/kernel/bpf/bpf_cgrp_storage.c
index d44fe8dd9732..28efd0a3f220 100644
--- a/kernel/bpf/bpf_cgrp_storage.c
+++ b/kernel/bpf/bpf_cgrp_storage.c
@@ -82,7 +82,7 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
int fd;
fd = *(int *)key;
- cgroup = cgroup_get_from_fd(fd);
+ cgroup = cgroup_v1v2_get_from_fd(fd);
if (IS_ERR(cgroup))
return ERR_CAST(cgroup);
@@ -101,7 +101,7 @@ static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
int fd;
fd = *(int *)key;
- cgroup = cgroup_get_from_fd(fd);
+ cgroup = cgroup_v1v2_get_from_fd(fd);
if (IS_ERR(cgroup))
return PTR_ERR(cgroup);
@@ -131,7 +131,7 @@ static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
int err, fd;
fd = *(int *)key;
- cgroup = cgroup_get_from_fd(fd);
+ cgroup = cgroup_v1v2_get_from_fd(fd);
if (IS_ERR(cgroup))
return PTR_ERR(cgroup);
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index e14c822f8911..e8e910395bf6 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -298,6 +298,18 @@ BTF_ID(func, bpf_lsm_kernel_module_request)
BTF_ID(func, bpf_lsm_kernel_read_file)
BTF_ID(func, bpf_lsm_kernfs_init_security)
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, bpf_lsm_path_unlink)
+BTF_ID(func, bpf_lsm_path_mkdir)
+BTF_ID(func, bpf_lsm_path_rmdir)
+BTF_ID(func, bpf_lsm_path_truncate)
+BTF_ID(func, bpf_lsm_path_symlink)
+BTF_ID(func, bpf_lsm_path_link)
+BTF_ID(func, bpf_lsm_path_rename)
+BTF_ID(func, bpf_lsm_path_chmod)
+BTF_ID(func, bpf_lsm_path_chown)
+#endif /* CONFIG_SECURITY_PATH */
+
#ifdef CONFIG_KEYS
BTF_ID(func, bpf_lsm_key_free)
#endif /* CONFIG_KEYS */
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index db6176fb64dc..02068bd0e4d9 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -352,18 +352,24 @@ const struct bpf_link_ops bpf_struct_ops_link_lops = {
int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
struct bpf_tramp_link *link,
const struct btf_func_model *model,
- void *image, void *image_end)
+ void *stub_func, void *image, void *image_end)
{
- u32 flags;
+ u32 flags = BPF_TRAMP_F_INDIRECT;
+ int size;
tlinks[BPF_TRAMP_FENTRY].links[0] = link;
tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
- /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
- * and it must be used alone.
- */
- flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
+
+ if (model->ret_size > 0)
+ flags |= BPF_TRAMP_F_RET_FENTRY_RET;
+
+ size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
+ if (size < 0)
+ return size;
+ if (size > (unsigned long)image_end - (unsigned long)image)
+ return -E2BIG;
return arch_prepare_bpf_trampoline(NULL, image, image_end,
- model, flags, tlinks, NULL);
+ model, flags, tlinks, stub_func);
}
static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
@@ -497,11 +503,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
err = bpf_struct_ops_prepare_trampoline(tlinks, link,
&st_ops->func_models[i],
+ *(void **)(st_ops->cfi_stubs + moff),
image, image_end);
if (err < 0)
goto reset_unlock;
- *(void **)(kdata + moff) = image;
+ *(void **)(kdata + moff) = image + cfi_get_offset();
image += err;
/* put prog_id to udata */
@@ -515,7 +522,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
if (err)
goto reset_unlock;
}
- set_memory_rox((long)st_map->image, 1);
+ arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
/* Let bpf_link handle registration & unregistration.
*
* Pair with smp_load_acquire() during lookup_elem().
@@ -524,7 +531,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
goto unlock;
}
- set_memory_rox((long)st_map->image, 1);
+ arch_protect_bpf_trampoline(st_map->image, PAGE_SIZE);
err = st_ops->reg(kdata);
if (likely(!err)) {
/* This refcnt increment on the map here after
@@ -547,8 +554,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
* there was a race in registering the struct_ops (under the same name) to
* a sub-system through different struct_ops's maps.
*/
- set_memory_nx((long)st_map->image, 1);
- set_memory_rw((long)st_map->image, 1);
+ arch_unprotect_bpf_trampoline(st_map->image, PAGE_SIZE);
reset_unlock:
bpf_struct_ops_map_put_progs(st_map);
@@ -616,7 +622,7 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map)
bpf_struct_ops_map_put_progs(st_map);
bpf_map_area_free(st_map->links);
if (st_map->image) {
- bpf_jit_free_exec(st_map->image);
+ arch_free_bpf_trampoline(st_map->image, PAGE_SIZE);
bpf_jit_uncharge_modmem(PAGE_SIZE);
}
bpf_map_area_free(st_map->uvalue);
@@ -691,7 +697,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return ERR_PTR(ret);
}
- st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
+ st_map->image = arch_alloc_bpf_trampoline(PAGE_SIZE);
if (!st_map->image) {
/* __bpf_struct_ops_map_free() uses st_map->image as flag
* for "charged or not". In this case, we need to unchange
@@ -711,7 +717,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
}
mutex_init(&st_map->lock);
- set_vm_flush_reset_perms(st_map->image);
bpf_map_init_from_attr(map, attr);
return map;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 15d71d2986d3..51e8b4bee0c8 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3840,9 +3840,6 @@ end:
return ERR_PTR(ret);
}
-#define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
-#define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
-
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
{
int i;
@@ -3855,13 +3852,13 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* Hence we only need to ensure that bpf_{list_head,rb_root} ownership
* does not form cycles.
*/
- if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & GRAPH_ROOT_MASK))
+ if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & BPF_GRAPH_ROOT))
return 0;
for (i = 0; i < rec->cnt; i++) {
struct btf_struct_meta *meta;
u32 btf_id;
- if (!(rec->fields[i].type & GRAPH_ROOT_MASK))
+ if (!(rec->fields[i].type & BPF_GRAPH_ROOT))
continue;
btf_id = rec->fields[i].graph_root.value_btf_id;
meta = btf_find_struct_meta(btf, btf_id);
@@ -3873,7 +3870,7 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* to check ownership cycle for a type unless it's also a
* node type.
*/
- if (!(rec->field_mask & GRAPH_NODE_MASK))
+ if (!(rec->field_mask & BPF_GRAPH_NODE))
continue;
/* We need to ensure ownership acyclicity among all types. The
@@ -3909,7 +3906,7 @@ int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec)
* - A is both an root and node.
* - B is only an node.
*/
- if (meta->record->field_mask & GRAPH_ROOT_MASK)
+ if (meta->record->field_mask & BPF_GRAPH_ROOT)
return -ELOOP;
}
return 0;
@@ -6768,222 +6765,64 @@ int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *pr
return btf_check_func_type_match(log, btf1, t1, btf2, t2);
}
-static int btf_check_func_arg_match(struct bpf_verifier_env *env,
- const struct btf *btf, u32 func_id,
- struct bpf_reg_state *regs,
- bool ptr_to_mem_ok,
- bool processing_call)
+static bool btf_is_dynptr_ptr(const struct btf *btf, const struct btf_type *t)
{
- enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
- struct bpf_verifier_log *log = &env->log;
- const char *func_name, *ref_tname;
- const struct btf_type *t, *ref_t;
- const struct btf_param *args;
- u32 i, nargs, ref_id;
- int ret;
-
- t = btf_type_by_id(btf, func_id);
- if (!t || !btf_type_is_func(t)) {
- /* These checks were already done by the verifier while loading
- * struct bpf_func_info or in add_kfunc_call().
- */
- bpf_log(log, "BTF of func_id %u doesn't point to KIND_FUNC\n",
- func_id);
- return -EFAULT;
- }
- func_name = btf_name_by_offset(btf, t->name_off);
-
- t = btf_type_by_id(btf, t->type);
- if (!t || !btf_type_is_func_proto(t)) {
- bpf_log(log, "Invalid BTF of func %s\n", func_name);
- return -EFAULT;
- }
- args = (const struct btf_param *)(t + 1);
- nargs = btf_type_vlen(t);
- if (nargs > MAX_BPF_FUNC_REG_ARGS) {
- bpf_log(log, "Function %s has %d > %d args\n", func_name, nargs,
- MAX_BPF_FUNC_REG_ARGS);
- return -EINVAL;
- }
-
- /* check that BTF function arguments match actual types that the
- * verifier sees.
- */
- for (i = 0; i < nargs; i++) {
- enum bpf_arg_type arg_type = ARG_DONTCARE;
- u32 regno = i + 1;
- struct bpf_reg_state *reg = &regs[regno];
-
- t = btf_type_skip_modifiers(btf, args[i].type, NULL);
- if (btf_type_is_scalar(t)) {
- if (reg->type == SCALAR_VALUE)
- continue;
- bpf_log(log, "R%d is not a scalar\n", regno);
- return -EINVAL;
- }
-
- if (!btf_type_is_ptr(t)) {
- bpf_log(log, "Unrecognized arg#%d type %s\n",
- i, btf_type_str(t));
- return -EINVAL;
- }
-
- ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
- ref_tname = btf_name_by_offset(btf, ref_t->name_off);
-
- ret = check_func_arg_reg_off(env, reg, regno, arg_type);
- if (ret < 0)
- return ret;
+ const char *name;
- if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
- /* If function expects ctx type in BTF check that caller
- * is passing PTR_TO_CTX.
- */
- if (reg->type != PTR_TO_CTX) {
- bpf_log(log,
- "arg#%d expected pointer to ctx, but got %s\n",
- i, btf_type_str(t));
- return -EINVAL;
- }
- } else if (ptr_to_mem_ok && processing_call) {
- const struct btf_type *resolve_ret;
- u32 type_size;
+ t = btf_type_by_id(btf, t->type); /* skip PTR */
- resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
- if (IS_ERR(resolve_ret)) {
- bpf_log(log,
- "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
- i, btf_type_str(ref_t), ref_tname,
- PTR_ERR(resolve_ret));
- return -EINVAL;
- }
+ while (btf_type_is_modifier(t))
+ t = btf_type_by_id(btf, t->type);
- if (check_mem_reg(env, reg, regno, type_size))
- return -EINVAL;
- } else {
- bpf_log(log, "reg type unsupported for arg#%d function %s#%d\n", i,
- func_name, func_id);
- return -EINVAL;
- }
+ /* allow either struct or struct forward declaration */
+ if (btf_type_is_struct(t) ||
+ (btf_type_is_fwd(t) && btf_type_kflag(t) == 0)) {
+ name = btf_str_by_offset(btf, t->name_off);
+ return name && strcmp(name, "bpf_dynptr") == 0;
}
- return 0;
-}
-
-/* Compare BTF of a function declaration with given bpf_reg_state.
- * Returns:
- * EFAULT - there is a verifier bug. Abort verification.
- * EINVAL - there is a type mismatch or BTF is not available.
- * 0 - BTF matches with what bpf_reg_state expects.
- * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
- */
-int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs)
-{
- struct bpf_prog *prog = env->prog;
- struct btf *btf = prog->aux->btf;
- bool is_global;
- u32 btf_id;
- int err;
-
- if (!prog->aux->func_info)
- return -EINVAL;
-
- btf_id = prog->aux->func_info[subprog].type_id;
- if (!btf_id)
- return -EFAULT;
-
- if (prog->aux->func_info_aux[subprog].unreliable)
- return -EINVAL;
-
- is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
- err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, false);
-
- /* Compiler optimizations can remove arguments from static functions
- * or mismatched type can be passed into a global function.
- * In such cases mark the function as unreliable from BTF point of view.
- */
- if (err)
- prog->aux->func_info_aux[subprog].unreliable = true;
- return err;
-}
-
-/* Compare BTF of a function call with given bpf_reg_state.
- * Returns:
- * EFAULT - there is a verifier bug. Abort verification.
- * EINVAL - there is a type mismatch or BTF is not available.
- * 0 - BTF matches with what bpf_reg_state expects.
- * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
- *
- * NOTE: the code is duplicated from btf_check_subprog_arg_match()
- * because btf_check_func_arg_match() is still doing both. Once that
- * function is split in 2, we can call from here btf_check_subprog_arg_match()
- * first, and then treat the calling part in a new code path.
- */
-int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs)
-{
- struct bpf_prog *prog = env->prog;
- struct btf *btf = prog->aux->btf;
- bool is_global;
- u32 btf_id;
- int err;
-
- if (!prog->aux->func_info)
- return -EINVAL;
-
- btf_id = prog->aux->func_info[subprog].type_id;
- if (!btf_id)
- return -EFAULT;
-
- if (prog->aux->func_info_aux[subprog].unreliable)
- return -EINVAL;
-
- is_global = prog->aux->func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
- err = btf_check_func_arg_match(env, btf, btf_id, regs, is_global, true);
-
- /* Compiler optimizations can remove arguments from static functions
- * or mismatched type can be passed into a global function.
- * In such cases mark the function as unreliable from BTF point of view.
- */
- if (err)
- prog->aux->func_info_aux[subprog].unreliable = true;
- return err;
+ return false;
}
-/* Convert BTF of a function into bpf_reg_state if possible
+/* Process BTF of a function to produce high-level expectation of function
+ * arguments (like ARG_PTR_TO_CTX, or ARG_PTR_TO_MEM, etc). This information
+ * is cached in subprog info for reuse.
* Returns:
* EFAULT - there is a verifier bug. Abort verification.
* EINVAL - cannot convert BTF.
- * 0 - Successfully converted BTF into bpf_reg_state
- * (either PTR_TO_CTX or SCALAR_VALUE).
+ * 0 - Successfully processed BTF and constructed argument expectations.
*/
-int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *regs, bool is_ex_cb)
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog)
{
+ bool is_global = subprog_aux(env, subprog)->linkage == BTF_FUNC_GLOBAL;
+ struct bpf_subprog_info *sub = subprog_info(env, subprog);
struct bpf_verifier_log *log = &env->log;
struct bpf_prog *prog = env->prog;
enum bpf_prog_type prog_type = prog->type;
struct btf *btf = prog->aux->btf;
const struct btf_param *args;
- const struct btf_type *t, *ref_t;
+ const struct btf_type *t, *ref_t, *fn_t;
u32 i, nargs, btf_id;
const char *tname;
- if (!prog->aux->func_info ||
- prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
+ if (sub->args_cached)
+ return 0;
+
+ if (!prog->aux->func_info) {
bpf_log(log, "Verifier bug\n");
return -EFAULT;
}
btf_id = prog->aux->func_info[subprog].type_id;
if (!btf_id) {
+ if (!is_global) /* not fatal for static funcs */
+ return -EINVAL;
bpf_log(log, "Global functions need valid BTF\n");
return -EFAULT;
}
- t = btf_type_by_id(btf, btf_id);
- if (!t || !btf_type_is_func(t)) {
+ fn_t = btf_type_by_id(btf, btf_id);
+ if (!fn_t || !btf_type_is_func(fn_t)) {
/* These checks were already done by the verifier while loading
* struct bpf_func_info
*/
@@ -6991,11 +6830,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
subprog);
return -EFAULT;
}
- tname = btf_name_by_offset(btf, t->name_off);
-
- if (log->level & BPF_LOG_LEVEL)
- bpf_log(log, "Validating %s() func#%d...\n",
- tname, subprog);
+ tname = btf_name_by_offset(btf, fn_t->name_off);
if (prog->aux->func_info_aux[subprog].unreliable) {
bpf_log(log, "Verifier bug in function %s()\n", tname);
@@ -7004,7 +6839,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
if (prog_type == BPF_PROG_TYPE_EXT)
prog_type = prog->aux->dst_prog->type;
- t = btf_type_by_id(btf, t->type);
+ t = btf_type_by_id(btf, fn_t->type);
if (!t || !btf_type_is_func_proto(t)) {
bpf_log(log, "Invalid type of function %s()\n", tname);
return -EFAULT;
@@ -7030,24 +6865,54 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
* Only PTR_TO_CTX and SCALAR are supported atm.
*/
for (i = 0; i < nargs; i++) {
- struct bpf_reg_state *reg = &regs[i + 1];
+ bool is_nonnull = false;
+ const char *tag;
t = btf_type_by_id(btf, args[i].type);
+
+ tag = btf_find_decl_tag_value(btf, fn_t, i, "arg:");
+ if (IS_ERR(tag) && PTR_ERR(tag) == -ENOENT) {
+ tag = NULL;
+ } else if (IS_ERR(tag)) {
+ bpf_log(log, "arg#%d type's tag fetching failure: %ld\n", i, PTR_ERR(tag));
+ return PTR_ERR(tag);
+ }
+ /* 'arg:<tag>' decl_tag takes precedence over derivation of
+ * register type from BTF type itself
+ */
+ if (tag) {
+ /* disallow arg tags in static subprogs */
+ if (!is_global) {
+ bpf_log(log, "arg#%d type tag is not supported in static functions\n", i);
+ return -EOPNOTSUPP;
+ }
+ if (strcmp(tag, "ctx") == 0) {
+ sub->args[i].arg_type = ARG_PTR_TO_CTX;
+ continue;
+ }
+ if (strcmp(tag, "nonnull") == 0)
+ is_nonnull = true;
+ }
+
while (btf_type_is_modifier(t))
t = btf_type_by_id(btf, t->type);
if (btf_type_is_int(t) || btf_is_any_enum(t)) {
- reg->type = SCALAR_VALUE;
+ sub->args[i].arg_type = ARG_ANYTHING;
continue;
}
- if (btf_type_is_ptr(t)) {
- if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
- reg->type = PTR_TO_CTX;
- continue;
- }
+ if (btf_type_is_ptr(t) && btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
+ sub->args[i].arg_type = ARG_PTR_TO_CTX;
+ continue;
+ }
+ if (btf_type_is_ptr(t) && btf_is_dynptr_ptr(btf, t)) {
+ sub->args[i].arg_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY;
+ continue;
+ }
+ if (is_global && btf_type_is_ptr(t)) {
+ u32 mem_size;
t = btf_type_skip_modifiers(btf, t->type, NULL);
-
- ref_t = btf_resolve_size(btf, t, &reg->mem_size);
+ ref_t = btf_resolve_size(btf, t, &mem_size);
if (IS_ERR(ref_t)) {
bpf_log(log,
"arg#%d reference type('%s %s') size cannot be determined: %ld\n",
@@ -7056,23 +6921,22 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
return -EINVAL;
}
- reg->type = PTR_TO_MEM | PTR_MAYBE_NULL;
- reg->id = ++env->id_gen;
-
+ sub->args[i].arg_type = is_nonnull ? ARG_PTR_TO_MEM : ARG_PTR_TO_MEM_OR_NULL;
+ sub->args[i].mem_size = mem_size;
continue;
}
+ if (is_nonnull) {
+ bpf_log(log, "arg#%d marked as non-null, but is not a pointer type\n", i);
+ return -EINVAL;
+ }
bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
i, btf_type_str(t), tname);
return -EINVAL;
}
- /* We have already ensured that the callback returns an integer, just
- * like all global subprogs. We need to determine it only has a single
- * scalar argument.
- */
- if (is_ex_cb && (nargs != 1 || regs[BPF_REG_1].type != SCALAR_VALUE)) {
- bpf_log(log, "exception cb only supports single integer argument\n");
- return -EINVAL;
- }
+
+ sub->arg_cnt = nargs;
+ sub->args_cached = true;
+
return 0;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fe254ae035fe..ea6843be2616 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -121,6 +121,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
#endif
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
+#ifdef CONFIG_FINEIBT
+ INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
+#endif
mutex_init(&fp->aux->used_maps_mutex);
mutex_init(&fp->aux->dst_mutex);
@@ -687,6 +690,23 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp)
fp->aux->ksym.prog = true;
bpf_ksym_add(&fp->aux->ksym);
+
+#ifdef CONFIG_FINEIBT
+ /*
+ * When FineIBT, code in the __cfi_foo() symbols can get executed
+ * and hence unwinder needs help.
+ */
+ if (cfi_mode != CFI_FINEIBT)
+ return;
+
+ snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
+ "__cfi_%s", fp->aux->ksym.name);
+
+ fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
+ fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func;
+
+ bpf_ksym_add(&fp->aux->ksym_prefix);
+#endif
}
void bpf_prog_kallsyms_del(struct bpf_prog *fp)
@@ -695,6 +715,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp)
return;
bpf_ksym_del(&fp->aux->ksym);
+#ifdef CONFIG_FINEIBT
+ if (cfi_mode != CFI_FINEIBT)
+ return;
+ bpf_ksym_del(&fp->aux->ksym_prefix);
+#endif
}
static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
@@ -932,20 +957,20 @@ out:
return ptr;
}
-void bpf_prog_pack_free(struct bpf_binary_header *hdr)
+void bpf_prog_pack_free(void *ptr, u32 size)
{
struct bpf_prog_pack *pack = NULL, *tmp;
unsigned int nbits;
unsigned long pos;
mutex_lock(&pack_mutex);
- if (hdr->size > BPF_PROG_PACK_SIZE) {
- bpf_jit_free_exec(hdr);
+ if (size > BPF_PROG_PACK_SIZE) {
+ bpf_jit_free_exec(ptr);
goto out;
}
list_for_each_entry(tmp, &pack_list, list) {
- if ((void *)hdr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > (void *)hdr) {
+ if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
pack = tmp;
break;
}
@@ -954,10 +979,10 @@ void bpf_prog_pack_free(struct bpf_binary_header *hdr)
if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
goto out;
- nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
- pos = ((unsigned long)hdr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
+ nbits = BPF_PROG_SIZE_TO_NBITS(size);
+ pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
- WARN_ONCE(bpf_arch_text_invalidate(hdr, hdr->size),
+ WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
"bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
bitmap_clear(pack->bitmap, pos, nbits);
@@ -1104,8 +1129,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
*rw_header = kvmalloc(size, GFP_KERNEL);
if (!*rw_header) {
- bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
- bpf_prog_pack_free(ro_header);
+ bpf_prog_pack_free(ro_header, size);
bpf_jit_uncharge_modmem(size);
return NULL;
}
@@ -1136,7 +1160,7 @@ int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
kvfree(rw_header);
if (IS_ERR(ptr)) {
- bpf_prog_pack_free(ro_header);
+ bpf_prog_pack_free(ro_header, ro_header->size);
return PTR_ERR(ptr);
}
return 0;
@@ -1157,7 +1181,7 @@ void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
{
u32 size = ro_header->size;
- bpf_prog_pack_free(ro_header);
+ bpf_prog_pack_free(ro_header, size);
kvfree(rw_header);
bpf_jit_uncharge_modmem(size);
}
@@ -2668,12 +2692,16 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
struct bpf_map **used_maps, u32 len)
{
struct bpf_map *map;
+ bool sleepable;
u32 i;
+ sleepable = aux->sleepable;
for (i = 0; i < len; i++) {
map = used_maps[i];
if (map->ops->map_poke_untrack)
map->ops->map_poke_untrack(map, aux);
+ if (sleepable)
+ atomic64_dec(&map->sleepable_refcnt);
bpf_map_put(map);
}
}
diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index e01c741e54e7..2e73533a3811 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -96,6 +96,12 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
migrate_enable();
}
+__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask)
+{
+ bpf_cpumask_release(cpumask);
+}
+CFI_NOSEAL(bpf_cpumask_release_dtor);
+
/**
* bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask.
* @cpumask: The cpumask being queried.
@@ -405,6 +411,17 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
return cpumask_any_and_distribute(src1, src2);
}
+/**
+ * bpf_cpumask_weight() - Return the number of bits in @cpumask.
+ * @cpumask: The cpumask being queried.
+ *
+ * Count the number of set bits in the given cpumask.
+ */
+__bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
+{
+ return cpumask_weight(cpumask);
+}
+
__bpf_kfunc_end_defs();
BTF_SET8_START(cpumask_kfunc_btf_ids)
@@ -432,6 +449,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
+BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
BTF_SET8_END(cpumask_kfunc_btf_ids)
static const struct btf_kfunc_id_set cpumask_kfunc_set = {
@@ -441,7 +459,7 @@ static const struct btf_kfunc_id_set cpumask_kfunc_set = {
BTF_ID_LIST(cpumask_dtor_ids)
BTF_ID(struct, bpf_cpumask)
-BTF_ID(func, bpf_cpumask_release)
+BTF_ID(func, bpf_cpumask_release_dtor)
static int __init cpumask_kfunc_init(void)
{
diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
index fa3e9225aedc..70fb82bf1637 100644
--- a/kernel/bpf/dispatcher.c
+++ b/kernel/bpf/dispatcher.c
@@ -150,14 +150,11 @@ void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
goto out;
d->rw_image = bpf_jit_alloc_exec(PAGE_SIZE);
if (!d->rw_image) {
- u32 size = PAGE_SIZE;
-
- bpf_arch_text_copy(d->image, &size, sizeof(size));
- bpf_prog_pack_free((struct bpf_binary_header *)d->image);
+ bpf_prog_pack_free(d->image, PAGE_SIZE);
d->image = NULL;
goto out;
}
- bpf_image_ksym_add(d->image, &d->ksym);
+ bpf_image_ksym_add(d->image, PAGE_SIZE, &d->ksym);
}
prev_num_progs = d->num_progs;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ba98ed2b3f9..03a6a2500b6a 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -898,7 +898,7 @@ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
if (map->ops->map_fd_put_ptr) {
ptr = fd_htab_map_get_ptr(map, l);
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, true);
}
}
@@ -2485,7 +2485,7 @@ static void fd_htab_map_free(struct bpf_map *map)
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
void *ptr = fd_htab_map_get_ptr(map, l);
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);
}
}
@@ -2524,9 +2524,15 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ /* The htab bucket lock is always held during update operations in fd
+ * htab map, and the following rcu_read_lock() is only used to avoid
+ * the WARN_ON_ONCE in htab_map_update_elem().
+ */
+ rcu_read_lock();
ret = htab_map_update_elem(map, key, &ptr, map_flags);
+ rcu_read_unlock();
if (ret)
- map->ops->map_fd_put_ptr(ptr);
+ map->ops->map_fd_put_ptr(map, ptr, false);
return ret;
}
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 56b0c1f678ee..be72824f32b2 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -32,12 +32,13 @@
*
* Different map implementations will rely on rcu in map methods
* lookup/update/delete, therefore eBPF programs must run under rcu lock
- * if program is allowed to access maps, so check rcu_read_lock_held in
- * all three functions.
+ * if program is allowed to access maps, so check rcu_read_lock_held() or
+ * rcu_read_lock_trace_held() in all three functions.
*/
BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
{
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
return (unsigned long) map->ops->map_lookup_elem(map, key);
}
@@ -53,7 +54,8 @@ const struct bpf_func_proto bpf_map_lookup_elem_proto = {
BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
void *, value, u64, flags)
{
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
return map->ops->map_update_elem(map, key, value, flags);
}
@@ -70,7 +72,8 @@ const struct bpf_func_proto bpf_map_update_elem_proto = {
BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
{
- WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
+ !rcu_read_lock_bh_held());
return map->ops->map_delete_elem(map, key);
}
@@ -1937,10 +1940,7 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
ma = &bpf_global_percpu_ma;
else
ma = &bpf_global_ma;
- if (rec && rec->refcount_off >= 0)
- bpf_mem_free_rcu(ma, p);
- else
- bpf_mem_free(ma, p);
+ bpf_mem_free_rcu(ma, p);
}
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
@@ -2150,6 +2150,12 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p)
put_task_struct_rcu_user(p);
}
+__bpf_kfunc void bpf_task_release_dtor(void *p)
+{
+ put_task_struct_rcu_user(p);
+}
+CFI_NOSEAL(bpf_task_release_dtor);
+
#ifdef CONFIG_CGROUPS
/**
* bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
@@ -2174,6 +2180,12 @@ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
cgroup_put(cgrp);
}
+__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
+{
+ cgroup_put(cgrp);
+}
+CFI_NOSEAL(bpf_cgroup_release_dtor);
+
/**
* bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
* array. A cgroup returned by this kfunc which is not subsequently stored in a
@@ -2231,6 +2243,25 @@ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
rcu_read_unlock();
return ret;
}
+
+/**
+ * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
+ * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
+ * hierarchy ID.
+ * @task: The target task
+ * @hierarchy_id: The ID of a cgroup1 hierarchy
+ *
+ * On success, the cgroup is returen. On failure, NULL is returned.
+ */
+__bpf_kfunc struct cgroup *
+bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
+{
+ struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
+
+ if (IS_ERR(cgrp))
+ return NULL;
+ return cgrp;
+}
#endif /* CONFIG_CGROUPS */
/**
@@ -2506,7 +2537,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
* which skips compiler generated instrumentation to do the same.
*/
kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
- ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp);
+ ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
WARN(1, "A call to BPF exception callback should never return\n");
}
@@ -2520,7 +2551,7 @@ BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
-BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
BTF_ID_FLAGS(func, bpf_list_push_front_impl)
BTF_ID_FLAGS(func, bpf_list_push_back_impl)
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
@@ -2537,6 +2568,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
+BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
#endif
BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_throw)
@@ -2550,10 +2582,10 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
BTF_ID_LIST(generic_dtor_ids)
BTF_ID(struct, task_struct)
-BTF_ID(func, bpf_task_release)
+BTF_ID(func, bpf_task_release_dtor)
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
-BTF_ID(func, bpf_cgroup_release)
+BTF_ID(func, bpf_cgroup_release_dtor)
#endif
BTF_SET8_START(common_btf_ids)
@@ -2610,6 +2642,7 @@ static int __init kfunc_init(void)
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
ARRAY_SIZE(generic_dtors),
@@ -2618,3 +2651,22 @@ static int __init kfunc_init(void)
}
late_initcall(kfunc_init);
+
+/* Get a pointer to dynptr data up to len bytes for read only access. If
+ * the dynptr doesn't have continuous data up to len bytes, return NULL.
+ */
+const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
+{
+ return bpf_dynptr_slice(ptr, 0, NULL, len);
+}
+
+/* Get a pointer to dynptr data up to len bytes for read write access. If
+ * the dynptr doesn't have continuous data up to len bytes, or the dynptr
+ * is read only, return NULL.
+ */
+void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
+{
+ if (__bpf_dynptr_is_rdonly(ptr))
+ return NULL;
+ return (void *)__bpf_dynptr_data(ptr, len);
+}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 1aafb2ff2e95..41e0a55c35f5 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -599,8 +599,15 @@ EXPORT_SYMBOL(bpf_prog_get_type_path);
*/
static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
- umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
-
+ struct inode *inode = d_inode(root);
+ umode_t mode = inode->i_mode & S_IALLUGO & ~S_ISVTX;
+
+ if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID))
+ seq_printf(m, ",uid=%u",
+ from_kuid_munged(&init_user_ns, inode->i_uid));
+ if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID))
+ seq_printf(m, ",gid=%u",
+ from_kgid_munged(&init_user_ns, inode->i_gid));
if (mode != S_IRWXUGO)
seq_printf(m, ",mode=%o", mode);
return 0;
@@ -625,15 +632,21 @@ static const struct super_operations bpf_super_ops = {
};
enum {
+ OPT_UID,
+ OPT_GID,
OPT_MODE,
};
static const struct fs_parameter_spec bpf_fs_parameters[] = {
+ fsparam_u32 ("uid", OPT_UID),
+ fsparam_u32 ("gid", OPT_GID),
fsparam_u32oct ("mode", OPT_MODE),
{}
};
struct bpf_mount_opts {
+ kuid_t uid;
+ kgid_t gid;
umode_t mode;
};
@@ -641,6 +654,8 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct bpf_mount_opts *opts = fc->fs_private;
struct fs_parse_result result;
+ kuid_t uid;
+ kgid_t gid;
int opt;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
@@ -662,12 +677,42 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
}
switch (opt) {
+ case OPT_UID:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ goto bad_value;
+
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fc->user_ns, uid))
+ goto bad_value;
+
+ opts->uid = uid;
+ break;
+ case OPT_GID:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ goto bad_value;
+
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fc->user_ns, gid))
+ goto bad_value;
+
+ opts->gid = gid;
+ break;
case OPT_MODE:
opts->mode = result.uint_32 & S_IALLUGO;
break;
}
return 0;
+bad_value:
+ return invalfc(fc, "Bad value for '%s'", param->key);
}
struct bpf_preload_ops *bpf_preload_ops;
@@ -750,6 +795,8 @@ static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_op = &bpf_super_ops;
inode = sb->s_root->d_inode;
+ inode->i_uid = opts->uid;
+ inode->i_gid = opts->gid;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
populate_bpffs(sb->s_root);
@@ -785,6 +832,8 @@ static int bpf_init_fs_context(struct fs_context *fc)
return -ENOMEM;
opts->mode = S_IRWXUGO;
+ opts->uid = current_fsuid();
+ opts->gid = current_fsgid();
fc->fs_private = opts;
fc->ops = &bpf_context_ops;
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index 850494423530..594a234f122b 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -10,6 +10,8 @@
#include <linux/bpf_verifier.h>
#include <linux/math64.h>
+#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
+
static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
{
/* ubuf and len_total should both be specified (or not) together */
@@ -325,3 +327,505 @@ __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
va_end(args);
}
EXPORT_SYMBOL_GPL(bpf_log);
+
+static const struct bpf_line_info *
+find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
+{
+ const struct bpf_line_info *linfo;
+ const struct bpf_prog *prog;
+ u32 i, nr_linfo;
+
+ prog = env->prog;
+ nr_linfo = prog->aux->nr_linfo;
+
+ if (!nr_linfo || insn_off >= prog->len)
+ return NULL;
+
+ linfo = prog->aux->linfo;
+ for (i = 1; i < nr_linfo; i++)
+ if (insn_off < linfo[i].insn_off)
+ break;
+
+ return &linfo[i - 1];
+}
+
+static const char *ltrim(const char *s)
+{
+ while (isspace(*s))
+ s++;
+
+ return s;
+}
+
+__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...)
+{
+ const struct bpf_line_info *linfo;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ linfo = find_linfo(env, insn_off);
+ if (!linfo || linfo == env->prev_linfo)
+ return;
+
+ if (prefix_fmt) {
+ va_list args;
+
+ va_start(args, prefix_fmt);
+ bpf_verifier_vlog(&env->log, prefix_fmt, args);
+ va_end(args);
+ }
+
+ verbose(env, "%s\n",
+ ltrim(btf_name_by_offset(env->prog->aux->btf,
+ linfo->line_off)));
+
+ env->prev_linfo = linfo;
+}
+
+static const char *btf_type_name(const struct btf *btf, u32 id)
+{
+ return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
+}
+
+/* string representation of 'enum bpf_reg_type'
+ *
+ * Note that reg_type_str() can not appear more than once in a single verbose()
+ * statement.
+ */
+const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
+{
+ char postfix[16] = {0}, prefix[64] = {0};
+ static const char * const str[] = {
+ [NOT_INIT] = "?",
+ [SCALAR_VALUE] = "scalar",
+ [PTR_TO_CTX] = "ctx",
+ [CONST_PTR_TO_MAP] = "map_ptr",
+ [PTR_TO_MAP_VALUE] = "map_value",
+ [PTR_TO_STACK] = "fp",
+ [PTR_TO_PACKET] = "pkt",
+ [PTR_TO_PACKET_META] = "pkt_meta",
+ [PTR_TO_PACKET_END] = "pkt_end",
+ [PTR_TO_FLOW_KEYS] = "flow_keys",
+ [PTR_TO_SOCKET] = "sock",
+ [PTR_TO_SOCK_COMMON] = "sock_common",
+ [PTR_TO_TCP_SOCK] = "tcp_sock",
+ [PTR_TO_TP_BUFFER] = "tp_buffer",
+ [PTR_TO_XDP_SOCK] = "xdp_sock",
+ [PTR_TO_BTF_ID] = "ptr_",
+ [PTR_TO_MEM] = "mem",
+ [PTR_TO_BUF] = "buf",
+ [PTR_TO_FUNC] = "func",
+ [PTR_TO_MAP_KEY] = "map_key",
+ [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
+ };
+
+ if (type & PTR_MAYBE_NULL) {
+ if (base_type(type) == PTR_TO_BTF_ID)
+ strncpy(postfix, "or_null_", 16);
+ else
+ strncpy(postfix, "_or_null", 16);
+ }
+
+ snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
+ type & MEM_RDONLY ? "rdonly_" : "",
+ type & MEM_RINGBUF ? "ringbuf_" : "",
+ type & MEM_USER ? "user_" : "",
+ type & MEM_PERCPU ? "percpu_" : "",
+ type & MEM_RCU ? "rcu_" : "",
+ type & PTR_UNTRUSTED ? "untrusted_" : "",
+ type & PTR_TRUSTED ? "trusted_" : ""
+ );
+
+ snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
+ prefix, str[base_type(type)], postfix);
+ return env->tmp_str_buf;
+}
+
+const char *dynptr_type_str(enum bpf_dynptr_type type)
+{
+ switch (type) {
+ case BPF_DYNPTR_TYPE_LOCAL:
+ return "local";
+ case BPF_DYNPTR_TYPE_RINGBUF:
+ return "ringbuf";
+ case BPF_DYNPTR_TYPE_SKB:
+ return "skb";
+ case BPF_DYNPTR_TYPE_XDP:
+ return "xdp";
+ case BPF_DYNPTR_TYPE_INVALID:
+ return "<invalid>";
+ default:
+ WARN_ONCE(1, "unknown dynptr type %d\n", type);
+ return "<unknown>";
+ }
+}
+
+const char *iter_type_str(const struct btf *btf, u32 btf_id)
+{
+ if (!btf || btf_id == 0)
+ return "<invalid>";
+
+ /* we already validated that type is valid and has conforming name */
+ return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
+}
+
+const char *iter_state_str(enum bpf_iter_state state)
+{
+ switch (state) {
+ case BPF_ITER_STATE_ACTIVE:
+ return "active";
+ case BPF_ITER_STATE_DRAINED:
+ return "drained";
+ case BPF_ITER_STATE_INVALID:
+ return "<invalid>";
+ default:
+ WARN_ONCE(1, "unknown iter state %d\n", state);
+ return "<unknown>";
+ }
+}
+
+static char slot_type_char[] = {
+ [STACK_INVALID] = '?',
+ [STACK_SPILL] = 'r',
+ [STACK_MISC] = 'm',
+ [STACK_ZERO] = '0',
+ [STACK_DYNPTR] = 'd',
+ [STACK_ITER] = 'i',
+};
+
+static void print_liveness(struct bpf_verifier_env *env,
+ enum bpf_reg_liveness live)
+{
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
+ verbose(env, "_");
+ if (live & REG_LIVE_READ)
+ verbose(env, "r");
+ if (live & REG_LIVE_WRITTEN)
+ verbose(env, "w");
+ if (live & REG_LIVE_DONE)
+ verbose(env, "D");
+}
+
+#define UNUM_MAX_DECIMAL U16_MAX
+#define SNUM_MAX_DECIMAL S16_MAX
+#define SNUM_MIN_DECIMAL S16_MIN
+
+static bool is_unum_decimal(u64 num)
+{
+ return num <= UNUM_MAX_DECIMAL;
+}
+
+static bool is_snum_decimal(s64 num)
+{
+ return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
+}
+
+static void verbose_unum(struct bpf_verifier_env *env, u64 num)
+{
+ if (is_unum_decimal(num))
+ verbose(env, "%llu", num);
+ else
+ verbose(env, "%#llx", num);
+}
+
+static void verbose_snum(struct bpf_verifier_env *env, s64 num)
+{
+ if (is_snum_decimal(num))
+ verbose(env, "%lld", num);
+ else
+ verbose(env, "%#llx", num);
+}
+
+int tnum_strn(char *str, size_t size, struct tnum a)
+{
+ /* print as a constant, if tnum is fully known */
+ if (a.mask == 0) {
+ if (is_unum_decimal(a.value))
+ return snprintf(str, size, "%llu", a.value);
+ else
+ return snprintf(str, size, "%#llx", a.value);
+ }
+ return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
+}
+EXPORT_SYMBOL_GPL(tnum_strn);
+
+static void print_scalar_ranges(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg,
+ const char **sep)
+{
+ /* For signed ranges, we want to unify 64-bit and 32-bit values in the
+ * output as much as possible, but there is a bit of a complication.
+ * If we choose to print values as decimals, this is natural to do,
+ * because negative 64-bit and 32-bit values >= -S32_MIN have the same
+ * representation due to sign extension. But if we choose to print
+ * them in hex format (see is_snum_decimal()), then sign extension is
+ * misleading.
+ * E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
+ * hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
+ * very different numbers.
+ * So we avoid sign extension if we choose to print values in hex.
+ */
+ struct {
+ const char *name;
+ u64 val;
+ bool omit;
+ } minmaxs[] = {
+ {"smin", reg->smin_value, reg->smin_value == S64_MIN},
+ {"smax", reg->smax_value, reg->smax_value == S64_MAX},
+ {"umin", reg->umin_value, reg->umin_value == 0},
+ {"umax", reg->umax_value, reg->umax_value == U64_MAX},
+ {"smin32",
+ is_snum_decimal((s64)reg->s32_min_value)
+ ? (s64)reg->s32_min_value
+ : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
+ {"smax32",
+ is_snum_decimal((s64)reg->s32_max_value)
+ ? (s64)reg->s32_max_value
+ : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
+ {"umin32", reg->u32_min_value, reg->u32_min_value == 0},
+ {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
+ }, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
+ bool neg1, neg2;
+
+ for (m1 = &minmaxs[0]; m1 < mend; m1++) {
+ if (m1->omit)
+ continue;
+
+ neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
+
+ verbose(env, "%s%s=", *sep, m1->name);
+ *sep = ",";
+
+ for (m2 = m1 + 2; m2 < mend; m2 += 2) {
+ if (m2->omit || m2->val != m1->val)
+ continue;
+ /* don't mix negatives with positives */
+ neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
+ if (neg2 != neg1)
+ continue;
+ m2->omit = true;
+ verbose(env, "%s=", m2->name);
+ }
+
+ if (m1->name[0] == 's')
+ verbose_snum(env, m1->val);
+ else
+ verbose_unum(env, m1->val);
+ }
+}
+
+static bool type_is_map_ptr(enum bpf_reg_type t) {
+ switch (base_type(t)) {
+ case CONST_PTR_TO_MAP:
+ case PTR_TO_MAP_KEY:
+ case PTR_TO_MAP_VALUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * _a stands for append, was shortened to avoid multiline statements below.
+ * This macro is used to output a comma separated list of attributes.
+ */
+#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
+
+static void print_reg_state(struct bpf_verifier_env *env,
+ const struct bpf_func_state *state,
+ const struct bpf_reg_state *reg)
+{
+ enum bpf_reg_type t;
+ const char *sep = "";
+
+ t = reg->type;
+ if (t == SCALAR_VALUE && reg->precise)
+ verbose(env, "P");
+ if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
+ /* reg->off should be 0 for SCALAR_VALUE */
+ verbose_snum(env, reg->var_off.value + reg->off);
+ return;
+ }
+
+ verbose(env, "%s", reg_type_str(env, t));
+ if (t == PTR_TO_STACK) {
+ if (state->frameno != reg->frameno)
+ verbose(env, "[%d]", reg->frameno);
+ if (tnum_is_const(reg->var_off)) {
+ verbose_snum(env, reg->var_off.value + reg->off);
+ return;
+ }
+ }
+ if (base_type(t) == PTR_TO_BTF_ID)
+ verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
+ verbose(env, "(");
+ if (reg->id)
+ verbose_a("id=%d", reg->id);
+ if (reg->ref_obj_id)
+ verbose_a("ref_obj_id=%d", reg->ref_obj_id);
+ if (type_is_non_owning_ref(reg->type))
+ verbose_a("%s", "non_own_ref");
+ if (type_is_map_ptr(t)) {
+ if (reg->map_ptr->name[0])
+ verbose_a("map=%s", reg->map_ptr->name);
+ verbose_a("ks=%d,vs=%d",
+ reg->map_ptr->key_size,
+ reg->map_ptr->value_size);
+ }
+ if (t != SCALAR_VALUE && reg->off) {
+ verbose_a("off=");
+ verbose_snum(env, reg->off);
+ }
+ if (type_is_pkt_pointer(t)) {
+ verbose_a("r=");
+ verbose_unum(env, reg->range);
+ }
+ if (base_type(t) == PTR_TO_MEM) {
+ verbose_a("sz=");
+ verbose_unum(env, reg->mem_size);
+ }
+ if (t == CONST_PTR_TO_DYNPTR)
+ verbose_a("type=%s", dynptr_type_str(reg->dynptr.type));
+ if (tnum_is_const(reg->var_off)) {
+ /* a pointer register with fixed offset */
+ if (reg->var_off.value) {
+ verbose_a("imm=");
+ verbose_snum(env, reg->var_off.value);
+ }
+ } else {
+ print_scalar_ranges(env, reg, &sep);
+ if (!tnum_is_unknown(reg->var_off)) {
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+ verbose_a("var_off=%s", tn_buf);
+ }
+ }
+ verbose(env, ")");
+}
+
+void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
+ bool print_all)
+{
+ const struct bpf_reg_state *reg;
+ int i;
+
+ if (state->frameno)
+ verbose(env, " frame%d:", state->frameno);
+ for (i = 0; i < MAX_BPF_REG; i++) {
+ reg = &state->regs[i];
+ if (reg->type == NOT_INIT)
+ continue;
+ if (!print_all && !reg_scratched(env, i))
+ continue;
+ verbose(env, " R%d", i);
+ print_liveness(env, reg->live);
+ verbose(env, "=");
+ print_reg_state(env, state, reg);
+ }
+ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
+ char types_buf[BPF_REG_SIZE + 1];
+ const char *sep = "";
+ bool valid = false;
+ u8 slot_type;
+ int j;
+
+ if (!print_all && !stack_slot_scratched(env, i))
+ continue;
+
+ for (j = 0; j < BPF_REG_SIZE; j++) {
+ slot_type = state->stack[i].slot_type[j];
+ if (slot_type != STACK_INVALID)
+ valid = true;
+ types_buf[j] = slot_type_char[slot_type];
+ }
+ types_buf[BPF_REG_SIZE] = 0;
+ if (!valid)
+ continue;
+
+ reg = &state->stack[i].spilled_ptr;
+ switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
+ case STACK_SPILL:
+ /* print MISC/ZERO/INVALID slots above subreg spill */
+ for (j = 0; j < BPF_REG_SIZE; j++)
+ if (state->stack[i].slot_type[j] == STACK_SPILL)
+ break;
+ types_buf[j] = '\0';
+
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, reg->live);
+ verbose(env, "=%s", types_buf);
+ print_reg_state(env, state, reg);
+ break;
+ case STACK_DYNPTR:
+ /* skip to main dynptr slot */
+ i += BPF_DYNPTR_NR_SLOTS - 1;
+ reg = &state->stack[i].spilled_ptr;
+
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, reg->live);
+ verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
+ if (reg->id)
+ verbose_a("id=%d", reg->id);
+ if (reg->ref_obj_id)
+ verbose_a("ref_id=%d", reg->ref_obj_id);
+ if (reg->dynptr_id)
+ verbose_a("dynptr_id=%d", reg->dynptr_id);
+ verbose(env, ")");
+ break;
+ case STACK_ITER:
+ /* only main slot has ref_obj_id set; skip others */
+ if (!reg->ref_obj_id)
+ continue;
+
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, reg->live);
+ verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
+ iter_type_str(reg->iter.btf, reg->iter.btf_id),
+ reg->ref_obj_id, iter_state_str(reg->iter.state),
+ reg->iter.depth);
+ break;
+ case STACK_MISC:
+ case STACK_ZERO:
+ default:
+ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
+ print_liveness(env, reg->live);
+ verbose(env, "=%s", types_buf);
+ break;
+ }
+ }
+ if (state->acquired_refs && state->refs[0].id) {
+ verbose(env, " refs=%d", state->refs[0].id);
+ for (i = 1; i < state->acquired_refs; i++)
+ if (state->refs[i].id)
+ verbose(env, ",%d", state->refs[i].id);
+ }
+ if (state->in_callback_fn)
+ verbose(env, " cb");
+ if (state->in_async_callback_fn)
+ verbose(env, " async_cb");
+ verbose(env, "\n");
+ if (!print_all)
+ mark_verifier_state_clean(env);
+}
+
+static inline u32 vlog_alignment(u32 pos)
+{
+ return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
+ BPF_LOG_MIN_ALIGNMENT) - pos - 1;
+}
+
+void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
+{
+ if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
+ /* remove new line character */
+ bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
+ verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
+ } else {
+ verbose(env, "%d:", env->insn_idx);
+ }
+ print_verifier_state(env, state, false);
+}
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 17c7e7782a1f..b32be680da6c 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -231,6 +231,9 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key)
struct lpm_trie_node *node, *found = NULL;
struct bpf_lpm_trie_key *key = _key;
+ if (key->prefixlen > trie->max_prefixlen)
+ return NULL;
+
/* Start walking the trie from the root node ... */
for (node = rcu_dereference_check(trie->root, rcu_read_lock_bh_held());
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index cd5eafaba97e..8ef269e66ba5 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -127,12 +127,21 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
return inner_map;
}
-void bpf_map_fd_put_ptr(void *ptr)
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
{
- /* ptr->ops->map_free() has to go through one
- * rcu grace period by itself.
+ struct bpf_map *inner_map = ptr;
+
+ /* Defer the freeing of inner map according to the sleepable attribute
+ * of bpf program which owns the outer map, so unnecessary waiting for
+ * RCU tasks trace grace period can be avoided.
*/
- bpf_map_put(ptr);
+ if (need_defer) {
+ if (atomic64_read(&map->sleepable_refcnt))
+ WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true);
+ else
+ WRITE_ONCE(inner_map->free_after_rcu_gp, true);
+ }
+ bpf_map_put(inner_map);
}
u32 bpf_map_fd_sys_lookup_elem(void *ptr)
diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
index bcb7534afb3c..7d61602354de 100644
--- a/kernel/bpf/map_in_map.h
+++ b/kernel/bpf/map_in_map.h
@@ -13,7 +13,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
void bpf_map_meta_free(struct bpf_map *map_meta);
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
int ufd);
-void bpf_map_fd_put_ptr(void *ptr);
+void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer);
u32 bpf_map_fd_sys_lookup_elem(void *ptr);
#endif
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 6a51cfe4c2d6..550f02e2cb13 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -121,6 +121,8 @@ struct bpf_mem_caches {
struct bpf_mem_cache cache[NUM_CACHES];
};
+static const u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
+
static struct llist_node notrace *__llist_del_first(struct llist_head *head)
{
struct llist_node *entry, *next;
@@ -462,11 +464,17 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c)
* consume ~ 11 Kbyte per cpu.
* Typical case will be between 11K and 116K closer to 11K.
* bpf progs can and should share bpf_mem_cache when possible.
+ *
+ * Percpu allocation is typically rare. To avoid potential unnecessary large
+ * memory consumption, set low_mark = 1 and high_mark = 3, resulting in c->batch = 1.
*/
static void init_refill_work(struct bpf_mem_cache *c)
{
init_irq_work(&c->refill_work, bpf_mem_refill);
- if (c->unit_size <= 256) {
+ if (c->percpu_size) {
+ c->low_watermark = 1;
+ c->high_watermark = 3;
+ } else if (c->unit_size <= 256) {
c->low_watermark = 32;
c->high_watermark = 96;
} else {
@@ -483,32 +491,16 @@ static void init_refill_work(struct bpf_mem_cache *c)
static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
{
- /* To avoid consuming memory assume that 1st run of bpf
- * prog won't be doing more than 4 map_update_elem from
- * irq disabled region
- */
- alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
-}
-
-static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
-{
- struct llist_node *first;
- unsigned int obj_size;
+ int cnt = 1;
- first = c->free_llist.first;
- if (!first)
- return 0;
-
- if (c->percpu_size)
- obj_size = pcpu_alloc_size(((void **)first)[1]);
- else
- obj_size = ksize(first);
- if (obj_size != c->unit_size) {
- WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
- idx, c->percpu_size, obj_size, c->unit_size);
- return -EINVAL;
- }
- return 0;
+ /* To avoid consuming memory, for non-percpu allocation, assume that
+ * 1st run of bpf prog won't be doing more than 4 map_update_elem from
+ * irq disabled region if unit size is less than or equal to 256.
+ * For all other cases, let us just do one allocation.
+ */
+ if (!c->percpu_size && c->unit_size <= 256)
+ cnt = 4;
+ alloc_bulk(c, cnt, cpu_to_node(cpu), false);
}
/* When size != 0 bpf_mem_cache for each cpu.
@@ -520,11 +512,13 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
*/
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
{
- static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
- int cpu, i, err, unit_size, percpu_size = 0;
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
+ int cpu, i, unit_size, percpu_size = 0;
+
+ if (percpu && size == 0)
+ return -EINVAL;
/* room for llist_node and per-cpu pointer */
if (percpu)
@@ -544,6 +538,8 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
if (memcg_bpf_enabled())
objcg = get_obj_cgroup_from_current();
#endif
+ ma->objcg = objcg;
+
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(pc, cpu);
c->unit_size = unit_size;
@@ -560,10 +556,10 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
if (!pcc)
return -ENOMEM;
- err = 0;
#ifdef CONFIG_MEMCG_KMEM
objcg = get_obj_cgroup_from_current();
#endif
+ ma->objcg = objcg;
for_each_possible_cpu(cpu) {
cc = per_cpu_ptr(pcc, cpu);
for (i = 0; i < NUM_CACHES; i++) {
@@ -574,28 +570,62 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->tgt = c;
init_refill_work(c);
- /* Another bpf_mem_cache will be used when allocating
- * c->unit_size in bpf_mem_alloc(), so doesn't prefill
- * for the bpf_mem_cache because these free objects will
- * never be used.
- */
- if (i != bpf_mem_cache_idx(c->unit_size))
- continue;
prefill_mem_cache(c, cpu);
- err = check_obj_size(c, i);
- if (err)
- goto out;
}
}
-out:
ma->caches = pcc;
- /* refill_work is either zeroed or initialized, so it is safe to
- * call irq_work_sync().
- */
- if (err)
- bpf_mem_alloc_destroy(ma);
- return err;
+ return 0;
+}
+
+int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg)
+{
+ struct bpf_mem_caches __percpu *pcc;
+
+ pcc = __alloc_percpu_gfp(sizeof(struct bpf_mem_caches), 8, GFP_KERNEL);
+ if (!pcc)
+ return -ENOMEM;
+
+ ma->caches = pcc;
+ ma->objcg = objcg;
+ ma->percpu = true;
+ return 0;
+}
+
+int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
+{
+ struct bpf_mem_caches *cc, __percpu *pcc;
+ int cpu, i, unit_size, percpu_size;
+ struct obj_cgroup *objcg;
+ struct bpf_mem_cache *c;
+
+ i = bpf_mem_cache_idx(size);
+ if (i < 0)
+ return -EINVAL;
+
+ /* room for llist_node and per-cpu pointer */
+ percpu_size = LLIST_NODE_SZ + sizeof(void *);
+
+ unit_size = sizes[i];
+ objcg = ma->objcg;
+ pcc = ma->caches;
+
+ for_each_possible_cpu(cpu) {
+ cc = per_cpu_ptr(pcc, cpu);
+ c = &cc->cache[i];
+ if (c->unit_size)
+ break;
+
+ c->unit_size = unit_size;
+ c->objcg = objcg;
+ c->percpu_size = percpu_size;
+ c->tgt = c;
+
+ init_refill_work(c);
+ prefill_mem_cache(c, cpu);
+ }
+
+ return 0;
}
static void drain_mem_cache(struct bpf_mem_cache *c)
@@ -729,9 +759,8 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
- /* objcg is the same across cpus */
- if (c->objcg)
- obj_cgroup_put(c->objcg);
+ if (ma->objcg)
+ obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
if (ma->caches) {
@@ -747,8 +776,8 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
}
}
- if (c->objcg)
- obj_cgroup_put(c->objcg);
+ if (ma->objcg)
+ obj_cgroup_put(ma->objcg);
destroy_mem_alloc(ma, rcu_in_progress);
}
}
@@ -869,9 +898,11 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
void *ret;
if (!size)
- return ZERO_SIZE_PTR;
+ return NULL;
- idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
+ if (!ma->percpu)
+ size += LLIST_NODE_SZ;
+ idx = bpf_mem_cache_idx(size);
if (idx < 0)
return NULL;
@@ -879,26 +910,17 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
-static notrace int bpf_mem_free_idx(void *ptr, bool percpu)
-{
- size_t size;
-
- if (percpu)
- size = pcpu_alloc_size(*((void **)ptr));
- else
- size = ksize(ptr - LLIST_NODE_SZ);
- return bpf_mem_cache_idx(size);
-}
-
void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
{
+ struct bpf_mem_cache *c;
int idx;
if (!ptr)
return;
- idx = bpf_mem_free_idx(ptr, ma->percpu);
- if (idx < 0)
+ c = *(void **)(ptr - LLIST_NODE_SZ);
+ idx = bpf_mem_cache_idx(c->unit_size);
+ if (WARN_ON_ONCE(idx < 0))
return;
unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
@@ -906,13 +928,15 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
{
+ struct bpf_mem_cache *c;
int idx;
if (!ptr)
return;
- idx = bpf_mem_free_idx(ptr, ma->percpu);
- if (idx < 0)
+ c = *(void **)(ptr - LLIST_NODE_SZ);
+ idx = bpf_mem_cache_idx(c->unit_size);
+ if (WARN_ON_ONCE(idx < 0))
return;
unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
@@ -986,41 +1010,3 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
-
-/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
- * actual size of dynamic per-cpu area will always be matched and there is
- * no need to adjust size_index for per-cpu allocation. However for the
- * simplicity of the implementation, use an unified size_index for both
- * kmalloc and per-cpu allocation.
- */
-static __init int bpf_mem_cache_adjust_size(void)
-{
- unsigned int size;
-
- /* Adjusting the indexes in size_index() according to the object_size
- * of underlying slab cache, so bpf_mem_alloc() will select a
- * bpf_mem_cache with unit_size equal to the object_size of
- * the underlying slab cache.
- *
- * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
- * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
- */
- for (size = 192; size >= 8; size -= 8) {
- unsigned int kmalloc_size, index;
-
- kmalloc_size = kmalloc_size_roundup(size);
- if (kmalloc_size == size)
- continue;
-
- if (kmalloc_size <= 192)
- index = size_index[(kmalloc_size - 1) / 8];
- else
- index = fls(kmalloc_size - 1) - 1;
- /* Only overwrite if necessary */
- if (size_index[(size - 1) / 8] != index)
- size_index[(size - 1) / 8] = index;
- }
-
- return 0;
-}
-subsys_initcall(bpf_mem_cache_adjust_size);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index d6b277482085..dff7ba539701 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -388,6 +388,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
{
u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
+ bool crosstask = task && task != current;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
bool user = flags & BPF_F_USER_STACK;
struct perf_callchain_entry *trace;
@@ -410,6 +411,14 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
if (task && user && !user_mode(regs))
goto err_fault;
+ /* get_perf_callchain does not support crosstask user stack walking
+ * but returns an empty stack instead of NULL.
+ */
+ if (crosstask && user) {
+ err = -EOPNOTSUPP;
+ goto clear;
+ }
+
num_elem = size / elem_size;
max_depth = num_elem + skip;
if (sysctl_perf_event_max_stack < max_depth)
@@ -421,7 +430,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
trace = get_callchain_entry_for_task(task, max_depth);
else
trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
- false, false);
+ crosstask, false);
if (unlikely(!trace))
goto err_fault;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0ed286b8a0f0..a1f18681721c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -142,9 +142,13 @@ static u32 bpf_map_value_size(const struct bpf_map *map)
static void maybe_wait_bpf_programs(struct bpf_map *map)
{
- /* Wait for any running BPF programs to complete so that
- * userspace, when we return to it, knows that all programs
- * that could be running use the new map value.
+ /* Wait for any running non-sleepable BPF programs to complete so that
+ * userspace, when we return to it, knows that all non-sleepable
+ * programs that could be running use the new map value. For sleepable
+ * BPF programs, synchronize_rcu_tasks_trace() should be used to wait
+ * for the completions of these programs, but considering the waiting
+ * time can be very long and userspace may think it will hang forever,
+ * so don't handle sleepable BPF programs now.
*/
if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
@@ -180,15 +184,11 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
err = bpf_percpu_cgroup_storage_update(map, key, value,
flags);
} else if (IS_FD_ARRAY(map)) {
- rcu_read_lock();
err = bpf_fd_array_map_update_elem(map, map_file, key, value,
flags);
- rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
- rcu_read_lock();
err = bpf_fd_htab_map_update_elem(map, map_file, key, value,
flags);
- rcu_read_unlock();
} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
/* rcu_read_lock() is not needed */
err = bpf_fd_reuseport_array_update_elem(map, key, value,
@@ -203,7 +203,6 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
rcu_read_unlock();
}
bpf_enable_instrumentation();
- maybe_wait_bpf_programs(map);
return err;
}
@@ -264,7 +263,6 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
}
bpf_enable_instrumentation();
- maybe_wait_bpf_programs(map);
return err;
}
@@ -694,6 +692,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);
struct btf_record *rec = map->record;
+ struct btf *btf = map->btf;
security_bpf_map_free(map);
bpf_map_release_memcg(map);
@@ -709,6 +708,10 @@ static void bpf_map_free_deferred(struct work_struct *work)
* template bpf_map struct used during verification.
*/
btf_record_free(rec);
+ /* Delay freeing of btf for maps, as map_free callback may need
+ * struct_meta info which will be freed with btf_put().
+ */
+ btf_put(btf);
}
static void bpf_map_put_uref(struct bpf_map *map)
@@ -719,6 +722,28 @@ static void bpf_map_put_uref(struct bpf_map *map)
}
}
+static void bpf_map_free_in_work(struct bpf_map *map)
+{
+ INIT_WORK(&map->work, bpf_map_free_deferred);
+ /* Avoid spawning kworkers, since they all might contend
+ * for the same mutex like slab_mutex.
+ */
+ queue_work(system_unbound_wq, &map->work);
+}
+
+static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
+{
+ bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
+}
+
+static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
+{
+ if (rcu_trace_implies_rcu_gp())
+ bpf_map_free_rcu_gp(rcu);
+ else
+ call_rcu(rcu, bpf_map_free_rcu_gp);
+}
+
/* decrement map refcnt and schedule it for freeing via workqueue
* (underlying map implementation ops->map_free() might sleep)
*/
@@ -727,12 +752,14 @@ void bpf_map_put(struct bpf_map *map)
if (atomic64_dec_and_test(&map->refcnt)) {
/* bpf_map_free_id() must be called first */
bpf_map_free_id(map);
- btf_put(map->btf);
- INIT_WORK(&map->work, bpf_map_free_deferred);
- /* Avoid spawning kworkers, since they all might contend
- * for the same mutex like slab_mutex.
- */
- queue_work(system_unbound_wq, &map->work);
+
+ WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
+ if (READ_ONCE(map->free_after_mult_rcu_gp))
+ call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
+ else if (READ_ONCE(map->free_after_rcu_gp))
+ call_rcu(&map->rcu, bpf_map_free_rcu_gp);
+ else
+ bpf_map_free_in_work(map);
}
}
EXPORT_SYMBOL_GPL(bpf_map_put);
@@ -1524,6 +1551,8 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
}
err = bpf_map_update_value(map, f.file, key, value, attr->flags);
+ if (!err)
+ maybe_wait_bpf_programs(map);
kvfree(value);
free_key:
@@ -1579,7 +1608,8 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
bpf_enable_instrumentation();
- maybe_wait_bpf_programs(map);
+ if (!err)
+ maybe_wait_bpf_programs(map);
out:
kvfree(key);
err_put:
@@ -1676,6 +1706,9 @@ int generic_map_delete_batch(struct bpf_map *map,
if (!max_count)
return 0;
+ if (put_user(0, &uattr->batch.count))
+ return -EFAULT;
+
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
if (!key)
return -ENOMEM;
@@ -1705,7 +1738,6 @@ int generic_map_delete_batch(struct bpf_map *map,
kvfree(key);
- maybe_wait_bpf_programs(map);
return err;
}
@@ -1733,6 +1765,9 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
if (!max_count)
return 0;
+ if (put_user(0, &uattr->batch.count))
+ return -EFAULT;
+
key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
if (!key)
return -ENOMEM;
@@ -1763,6 +1798,7 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
kvfree(value);
kvfree(key);
+
return err;
}
@@ -2573,7 +2609,8 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
BPF_F_SLEEPABLE |
BPF_F_TEST_RND_HI32 |
BPF_F_XDP_HAS_FRAGS |
- BPF_F_XDP_DEV_BOUND_ONLY))
+ BPF_F_XDP_DEV_BOUND_ONLY |
+ BPF_F_TEST_REG_INVARIANTS))
return -EINVAL;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -2701,6 +2738,22 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
goto free_prog_sec;
}
+ /*
+ * Bookkeeping for managing the program attachment chain.
+ *
+ * It might be tempting to set attach_tracing_prog flag at the attachment
+ * time, but this will not prevent from loading bunch of tracing prog
+ * first, then attach them one to another.
+ *
+ * The flag attach_tracing_prog is set for the whole program lifecycle, and
+ * doesn't have to be cleared in bpf_tracing_link_release, since tracing
+ * programs cannot change attachment target.
+ */
+ if (type == BPF_PROG_TYPE_TRACING && dst_prog &&
+ dst_prog->type == BPF_PROG_TYPE_TRACING) {
+ prog->aux->attach_tracing_prog = true;
+ }
+
/* find program type: socket_filter vs tracing_filter */
err = find_prog_type(type, prog);
if (err < 0)
@@ -3134,7 +3187,12 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
}
if (tgt_prog_fd) {
- /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
+ /*
+ * For now we only allow new targets for BPF_PROG_TYPE_EXT. If this
+ * part would be changed to implement the same for
+ * BPF_PROG_TYPE_TRACING, do not forget to update the way how
+ * attach_tracing_prog flag is set.
+ */
if (prog->type != BPF_PROG_TYPE_EXT) {
err = -EINVAL;
goto out_put_prog;
@@ -3179,6 +3237,10 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
*
* - if prog->aux->dst_trampoline and tgt_prog is NULL, the program
* was detached and is going for re-attachment.
+ *
+ * - if prog->aux->dst_trampoline is NULL and tgt_prog and prog->aux->attach_btf
+ * are NULL, then program was already attached and user did not provide
+ * tgt_prog_fd so we have no way to find out or create trampoline
*/
if (!prog->aux->dst_trampoline && !tgt_prog) {
/*
@@ -3192,6 +3254,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
err = -EINVAL;
goto out_unlock;
}
+ /* We can allow re-attach only if we have valid attach_btf. */
+ if (!prog->aux->attach_btf) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
btf_id = prog->aux->attach_btf_id;
key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf, btf_id);
}
@@ -4919,8 +4986,10 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
else
BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr);
err_put:
- if (has_write)
+ if (has_write) {
+ maybe_wait_bpf_programs(map);
bpf_map_write_active_dec(map);
+ }
fdput(f);
return err;
}
@@ -5322,6 +5391,11 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
goto out_unlock;
}
+ /* The bpf program will not access the bpf map, but for the sake of
+ * simplicity, increase sleepable_refcnt for sleepable program as well.
+ */
+ if (prog->aux->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
memcpy(used_maps_new, used_maps_old,
sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
used_maps_new[prog->aux->used_map_cnt] = map;
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 26082b97894d..e5c3500443c6 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -70,15 +70,13 @@ static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_comm
return NULL;
retry:
- task = next_thread(task);
+ task = __next_thread(task);
+ if (!task)
+ return NULL;
next_tid = __task_pid_nr_ns(task, PIDTYPE_PID, common->ns);
- if (!next_tid || next_tid == common->pid) {
- /* Run out of tasks of a process. The tasks of a
- * thread_group are linked as circular linked list.
- */
- return NULL;
- }
+ if (!next_tid)
+ goto retry;
if (skip_if_dup_files && task->files == task->group_leader->files)
goto retry;
@@ -980,7 +978,6 @@ __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) !=
__alignof__(struct bpf_iter_task));
- kit->task = kit->pos = NULL;
switch (flags) {
case BPF_TASK_ITER_ALL_THREADS:
case BPF_TASK_ITER_ALL_PROCS:
@@ -1017,20 +1014,16 @@ __bpf_kfunc struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it)
if (flags == BPF_TASK_ITER_ALL_PROCS)
goto get_next_task;
- kit->pos = next_thread(kit->pos);
- if (kit->pos == kit->task) {
- if (flags == BPF_TASK_ITER_PROC_THREADS) {
- kit->pos = NULL;
- return pos;
- }
- } else
+ kit->pos = __next_thread(kit->pos);
+ if (kit->pos || flags == BPF_TASK_ITER_PROC_THREADS)
return pos;
get_next_task:
- kit->pos = next_task(kit->pos);
- kit->task = kit->pos;
- if (kit->pos == &init_task)
+ kit->task = next_task(kit->task);
+ if (kit->task == &init_task)
kit->pos = NULL;
+ else
+ kit->pos = kit->task;
return pos;
}
diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c
index 3d7127f439a1..9dbc31b25e3d 100644
--- a/kernel/bpf/tnum.c
+++ b/kernel/bpf/tnum.c
@@ -172,12 +172,6 @@ bool tnum_in(struct tnum a, struct tnum b)
return a.value == b.value;
}
-int tnum_strn(char *str, size_t size, struct tnum a)
-{
- return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
-}
-EXPORT_SYMBOL_GPL(tnum_strn);
-
int tnum_sbin(char *str, size_t size, struct tnum a)
{
size_t n;
@@ -208,7 +202,12 @@ struct tnum tnum_clear_subreg(struct tnum a)
return tnum_lshift(tnum_rshift(a, 32), 32);
}
+struct tnum tnum_with_subreg(struct tnum reg, struct tnum subreg)
+{
+ return tnum_or(tnum_clear_subreg(reg), tnum_subreg(subreg));
+}
+
struct tnum tnum_const_subreg(struct tnum a, u32 value)
{
- return tnum_or(tnum_clear_subreg(a), tnum_const(value));
+ return tnum_with_subreg(a, tnum_const(value));
}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index e97aeda3a86b..d382f5ebe06c 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -115,10 +115,10 @@ bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
}
-void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
+void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym)
{
ksym->start = (unsigned long) data;
- ksym->end = ksym->start + PAGE_SIZE;
+ ksym->end = ksym->start + size;
bpf_ksym_add(ksym);
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
PAGE_SIZE, false, ksym->name);
@@ -254,8 +254,8 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_a
static void bpf_tramp_image_free(struct bpf_tramp_image *im)
{
bpf_image_ksym_del(&im->ksym);
- bpf_jit_free_exec(im->image);
- bpf_jit_uncharge_modmem(PAGE_SIZE);
+ arch_free_bpf_trampoline(im->image, im->size);
+ bpf_jit_uncharge_modmem(im->size);
percpu_ref_exit(&im->pcref);
kfree_rcu(im, rcu);
}
@@ -349,7 +349,7 @@ static void bpf_tramp_image_put(struct bpf_tramp_image *im)
call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
}
-static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
+static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, int size)
{
struct bpf_tramp_image *im;
struct bpf_ksym *ksym;
@@ -360,15 +360,15 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
if (!im)
goto out;
- err = bpf_jit_charge_modmem(PAGE_SIZE);
+ err = bpf_jit_charge_modmem(size);
if (err)
goto out_free_im;
+ im->size = size;
err = -ENOMEM;
- im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
+ im->image = image = arch_alloc_bpf_trampoline(size);
if (!image)
goto out_uncharge;
- set_vm_flush_reset_perms(image);
err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
if (err)
@@ -377,13 +377,13 @@ static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
ksym = &im->ksym;
INIT_LIST_HEAD_RCU(&ksym->lnode);
snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
- bpf_image_ksym_add(image, ksym);
+ bpf_image_ksym_add(image, size, ksym);
return im;
out_free_image:
- bpf_jit_free_exec(im->image);
+ arch_free_bpf_trampoline(im->image, im->size);
out_uncharge:
- bpf_jit_uncharge_modmem(PAGE_SIZE);
+ bpf_jit_uncharge_modmem(size);
out_free_im:
kfree(im);
out:
@@ -396,7 +396,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
struct bpf_tramp_links *tlinks;
u32 orig_flags = tr->flags;
bool ip_arg = false;
- int err, total;
+ int err, total, size;
tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
if (IS_ERR(tlinks))
@@ -409,12 +409,6 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut
goto out;
}
- im = bpf_tramp_image_alloc(tr->key);
- if (IS_ERR(im)) {
- err = PTR_ERR(im);
- goto out;
- }
-
/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
@@ -438,13 +432,31 @@ again:
tr->flags |= BPF_TRAMP_F_ORIG_STACK;
#endif
- err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
+ size = arch_bpf_trampoline_size(&tr->func.model, tr->flags,
+ tlinks, tr->func.addr);
+ if (size < 0) {
+ err = size;
+ goto out;
+ }
+
+ if (size > PAGE_SIZE) {
+ err = -E2BIG;
+ goto out;
+ }
+
+ im = bpf_tramp_image_alloc(tr->key, size);
+ if (IS_ERR(im)) {
+ err = PTR_ERR(im);
+ goto out;
+ }
+
+ err = arch_prepare_bpf_trampoline(im, im->image, im->image + size,
&tr->func.model, tr->flags, tlinks,
tr->func.addr);
if (err < 0)
goto out_free;
- set_memory_rox((long)im->image, 1);
+ arch_protect_bpf_trampoline(im->image, im->size);
WARN_ON(tr->cur_image && total == 0);
if (tr->cur_image)
@@ -464,9 +476,8 @@ again:
tr->fops->func = NULL;
tr->fops->trampoline = 0;
- /* reset im->image memory attr for arch_prepare_bpf_trampoline */
- set_memory_nx((long)im->image, 1);
- set_memory_rw((long)im->image, 1);
+ /* free im memory and reallocate later */
+ bpf_tramp_image_free(im);
goto again;
}
#endif
@@ -1032,10 +1043,50 @@ bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
}
int __weak
-arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
+arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
- void *orig_call)
+ void *func_addr)
+{
+ return -ENOTSUPP;
+}
+
+void * __weak arch_alloc_bpf_trampoline(unsigned int size)
+{
+ void *image;
+
+ if (WARN_ON_ONCE(size > PAGE_SIZE))
+ return NULL;
+ image = bpf_jit_alloc_exec(PAGE_SIZE);
+ if (image)
+ set_vm_flush_reset_perms(image);
+ return image;
+}
+
+void __weak arch_free_bpf_trampoline(void *image, unsigned int size)
+{
+ WARN_ON_ONCE(size > PAGE_SIZE);
+ /* bpf_jit_free_exec doesn't need "size", but
+ * bpf_prog_pack_free() needs it.
+ */
+ bpf_jit_free_exec(image);
+}
+
+void __weak arch_protect_bpf_trampoline(void *image, unsigned int size)
+{
+ WARN_ON_ONCE(size > PAGE_SIZE);
+ set_memory_rox((long)image, 1);
+}
+
+void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size)
+{
+ WARN_ON_ONCE(size > PAGE_SIZE);
+ set_memory_nx((long)image, 1);
+ set_memory_rw((long)image, 1);
+}
+
+int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
{
return -ENOTSUPP;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index af2819d5c8ee..adbf330d364b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -195,6 +195,8 @@ struct bpf_verifier_stack_elem {
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
+#define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512
+
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
@@ -339,30 +341,14 @@ struct bpf_kfunc_call_arg_meta {
struct btf *btf_vmlinux;
-static DEFINE_MUTEX(bpf_verifier_lock);
-static DEFINE_MUTEX(bpf_percpu_ma_lock);
-
-static const struct bpf_line_info *
-find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
+static const char *btf_type_name(const struct btf *btf, u32 id)
{
- const struct bpf_line_info *linfo;
- const struct bpf_prog *prog;
- u32 i, nr_linfo;
-
- prog = env->prog;
- nr_linfo = prog->aux->nr_linfo;
-
- if (!nr_linfo || insn_off >= prog->len)
- return NULL;
-
- linfo = prog->aux->linfo;
- for (i = 1; i < nr_linfo; i++)
- if (insn_off < linfo[i].insn_off)
- break;
-
- return &linfo[i - 1];
+ return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
}
+static DEFINE_MUTEX(bpf_verifier_lock);
+static DEFINE_MUTEX(bpf_percpu_ma_lock);
+
__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
{
struct bpf_verifier_env *env = private_data;
@@ -376,73 +362,25 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
va_end(args);
}
-static const char *ltrim(const char *s)
-{
- while (isspace(*s))
- s++;
-
- return s;
-}
-
-__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
- u32 insn_off,
- const char *prefix_fmt, ...)
-{
- const struct bpf_line_info *linfo;
-
- if (!bpf_verifier_log_needed(&env->log))
- return;
-
- linfo = find_linfo(env, insn_off);
- if (!linfo || linfo == env->prev_linfo)
- return;
-
- if (prefix_fmt) {
- va_list args;
-
- va_start(args, prefix_fmt);
- bpf_verifier_vlog(&env->log, prefix_fmt, args);
- va_end(args);
- }
-
- verbose(env, "%s\n",
- ltrim(btf_name_by_offset(env->prog->aux->btf,
- linfo->line_off)));
-
- env->prev_linfo = linfo;
-}
-
static void verbose_invalid_scalar(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
- struct tnum *range, const char *ctx,
+ struct bpf_retval_range range, const char *ctx,
const char *reg_name)
{
- char tn_buf[48];
+ bool unknown = true;
- verbose(env, "At %s the register %s ", ctx, reg_name);
- if (!tnum_is_unknown(reg->var_off)) {
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "has value %s", tn_buf);
- } else {
- verbose(env, "has unknown scalar value");
+ verbose(env, "%s the register %s has", ctx, reg_name);
+ if (reg->smin_value > S64_MIN) {
+ verbose(env, " smin=%lld", reg->smin_value);
+ unknown = false;
}
- tnum_strn(tn_buf, sizeof(tn_buf), *range);
- verbose(env, " should have been in %s\n", tn_buf);
-}
-
-static bool type_is_pkt_pointer(enum bpf_reg_type type)
-{
- type = base_type(type);
- return type == PTR_TO_PACKET ||
- type == PTR_TO_PACKET_META;
-}
-
-static bool type_is_sk_pointer(enum bpf_reg_type type)
-{
- return type == PTR_TO_SOCKET ||
- type == PTR_TO_SOCK_COMMON ||
- type == PTR_TO_TCP_SOCK ||
- type == PTR_TO_XDP_SOCK;
+ if (reg->smax_value < S64_MAX) {
+ verbose(env, " smax=%lld", reg->smax_value);
+ unknown = false;
+ }
+ if (unknown)
+ verbose(env, " unknown scalar value");
+ verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval);
}
static bool type_may_be_null(u32 type)
@@ -468,16 +406,6 @@ static bool reg_not_null(const struct bpf_reg_state *reg)
type == PTR_TO_MEM;
}
-static bool type_is_ptr_alloc_obj(u32 type)
-{
- return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
-}
-
-static bool type_is_non_owning_ref(u32 type)
-{
- return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
-}
-
static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
{
struct btf_record *rec = NULL;
@@ -500,6 +428,31 @@ static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL;
}
+static const char *subprog_name(const struct bpf_verifier_env *env, int subprog)
+{
+ struct bpf_func_info *info;
+
+ if (!env->prog->aux->func_info)
+ return "";
+
+ info = &env->prog->aux->func_info[subprog];
+ return btf_type_name(env->prog->aux->btf, info->type_id);
+}
+
+static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog)
+{
+ struct bpf_subprog_info *info = subprog_info(env, subprog);
+
+ info->is_cb = true;
+ info->is_async_cb = true;
+ info->is_exception_cb = true;
+}
+
+static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog)
+{
+ return subprog_info(env, subprog)->is_exception_cb;
+}
+
static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
{
return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
@@ -605,83 +558,6 @@ static bool is_cmpxchg_insn(const struct bpf_insn *insn)
insn->imm == BPF_CMPXCHG;
}
-/* string representation of 'enum bpf_reg_type'
- *
- * Note that reg_type_str() can not appear more than once in a single verbose()
- * statement.
- */
-static const char *reg_type_str(struct bpf_verifier_env *env,
- enum bpf_reg_type type)
-{
- char postfix[16] = {0}, prefix[64] = {0};
- static const char * const str[] = {
- [NOT_INIT] = "?",
- [SCALAR_VALUE] = "scalar",
- [PTR_TO_CTX] = "ctx",
- [CONST_PTR_TO_MAP] = "map_ptr",
- [PTR_TO_MAP_VALUE] = "map_value",
- [PTR_TO_STACK] = "fp",
- [PTR_TO_PACKET] = "pkt",
- [PTR_TO_PACKET_META] = "pkt_meta",
- [PTR_TO_PACKET_END] = "pkt_end",
- [PTR_TO_FLOW_KEYS] = "flow_keys",
- [PTR_TO_SOCKET] = "sock",
- [PTR_TO_SOCK_COMMON] = "sock_common",
- [PTR_TO_TCP_SOCK] = "tcp_sock",
- [PTR_TO_TP_BUFFER] = "tp_buffer",
- [PTR_TO_XDP_SOCK] = "xdp_sock",
- [PTR_TO_BTF_ID] = "ptr_",
- [PTR_TO_MEM] = "mem",
- [PTR_TO_BUF] = "buf",
- [PTR_TO_FUNC] = "func",
- [PTR_TO_MAP_KEY] = "map_key",
- [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
- };
-
- if (type & PTR_MAYBE_NULL) {
- if (base_type(type) == PTR_TO_BTF_ID)
- strncpy(postfix, "or_null_", 16);
- else
- strncpy(postfix, "_or_null", 16);
- }
-
- snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
- type & MEM_RDONLY ? "rdonly_" : "",
- type & MEM_RINGBUF ? "ringbuf_" : "",
- type & MEM_USER ? "user_" : "",
- type & MEM_PERCPU ? "percpu_" : "",
- type & MEM_RCU ? "rcu_" : "",
- type & PTR_UNTRUSTED ? "untrusted_" : "",
- type & PTR_TRUSTED ? "trusted_" : ""
- );
-
- snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
- prefix, str[base_type(type)], postfix);
- return env->tmp_str_buf;
-}
-
-static char slot_type_char[] = {
- [STACK_INVALID] = '?',
- [STACK_SPILL] = 'r',
- [STACK_MISC] = 'm',
- [STACK_ZERO] = '0',
- [STACK_DYNPTR] = 'd',
- [STACK_ITER] = 'i',
-};
-
-static void print_liveness(struct bpf_verifier_env *env,
- enum bpf_reg_liveness live)
-{
- if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
- verbose(env, "_");
- if (live & REG_LIVE_READ)
- verbose(env, "r");
- if (live & REG_LIVE_WRITTEN)
- verbose(env, "w");
- if (live & REG_LIVE_DONE)
- verbose(env, "D");
-}
-
static int __get_spi(s32 off)
{
return (-off - 1) / BPF_REG_SIZE;
@@ -746,92 +622,6 @@ static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
}
-static const char *btf_type_name(const struct btf *btf, u32 id)
-{
- return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
-}
-
-static const char *dynptr_type_str(enum bpf_dynptr_type type)
-{
- switch (type) {
- case BPF_DYNPTR_TYPE_LOCAL:
- return "local";
- case BPF_DYNPTR_TYPE_RINGBUF:
- return "ringbuf";
- case BPF_DYNPTR_TYPE_SKB:
- return "skb";
- case BPF_DYNPTR_TYPE_XDP:
- return "xdp";
- case BPF_DYNPTR_TYPE_INVALID:
- return "<invalid>";
- default:
- WARN_ONCE(1, "unknown dynptr type %d\n", type);
- return "<unknown>";
- }
-}
-
-static const char *iter_type_str(const struct btf *btf, u32 btf_id)
-{
- if (!btf || btf_id == 0)
- return "<invalid>";
-
- /* we already validated that type is valid and has conforming name */
- return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
-}
-
-static const char *iter_state_str(enum bpf_iter_state state)
-{
- switch (state) {
- case BPF_ITER_STATE_ACTIVE:
- return "active";
- case BPF_ITER_STATE_DRAINED:
- return "drained";
- case BPF_ITER_STATE_INVALID:
- return "<invalid>";
- default:
- WARN_ONCE(1, "unknown iter state %d\n", state);
- return "<unknown>";
- }
-}
-
-static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
-{
- env->scratched_regs |= 1U << regno;
-}
-
-static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
-{
- env->scratched_stack_slots |= 1ULL << spi;
-}
-
-static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
-{
- return (env->scratched_regs >> regno) & 1;
-}
-
-static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
-{
- return (env->scratched_stack_slots >> regno) & 1;
-}
-
-static bool verifier_state_scratched(const struct bpf_verifier_env *env)
-{
- return env->scratched_regs || env->scratched_stack_slots;
-}
-
-static void mark_verifier_state_clean(struct bpf_verifier_env *env)
-{
- env->scratched_regs = 0U;
- env->scratched_stack_slots = 0ULL;
-}
-
-/* Used for printing the entire verifier state. */
-static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
-{
- env->scratched_regs = ~0U;
- env->scratched_stack_slots = ~0ULL;
-}
-
static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
{
switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
@@ -1365,230 +1155,25 @@ static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
stack->spilled_ptr.type == SCALAR_VALUE;
}
-static void scrub_spilled_slot(u8 *stype)
-{
- if (*stype != STACK_INVALID)
- *stype = STACK_MISC;
-}
-
-static void print_scalar_ranges(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg,
- const char **sep)
-{
- struct {
- const char *name;
- u64 val;
- bool omit;
- } minmaxs[] = {
- {"smin", reg->smin_value, reg->smin_value == S64_MIN},
- {"smax", reg->smax_value, reg->smax_value == S64_MAX},
- {"umin", reg->umin_value, reg->umin_value == 0},
- {"umax", reg->umax_value, reg->umax_value == U64_MAX},
- {"smin32", (s64)reg->s32_min_value, reg->s32_min_value == S32_MIN},
- {"smax32", (s64)reg->s32_max_value, reg->s32_max_value == S32_MAX},
- {"umin32", reg->u32_min_value, reg->u32_min_value == 0},
- {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
- }, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
- bool neg1, neg2;
-
- for (m1 = &minmaxs[0]; m1 < mend; m1++) {
- if (m1->omit)
- continue;
-
- neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
-
- verbose(env, "%s%s=", *sep, m1->name);
- *sep = ",";
-
- for (m2 = m1 + 2; m2 < mend; m2 += 2) {
- if (m2->omit || m2->val != m1->val)
- continue;
- /* don't mix negatives with positives */
- neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
- if (neg2 != neg1)
- continue;
- m2->omit = true;
- verbose(env, "%s=", m2->name);
- }
-
- verbose(env, m1->name[0] == 's' ? "%lld" : "%llu", m1->val);
- }
-}
-
-static void print_verifier_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state,
- bool print_all)
-{
- const struct bpf_reg_state *reg;
- enum bpf_reg_type t;
- int i;
-
- if (state->frameno)
- verbose(env, " frame%d:", state->frameno);
- for (i = 0; i < MAX_BPF_REG; i++) {
- reg = &state->regs[i];
- t = reg->type;
- if (t == NOT_INIT)
- continue;
- if (!print_all && !reg_scratched(env, i))
- continue;
- verbose(env, " R%d", i);
- print_liveness(env, reg->live);
- verbose(env, "=");
- if (t == SCALAR_VALUE && reg->precise)
- verbose(env, "P");
- if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
- tnum_is_const(reg->var_off)) {
- /* reg->off should be 0 for SCALAR_VALUE */
- verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
- verbose(env, "%lld", reg->var_off.value + reg->off);
- } else {
- const char *sep = "";
-
- verbose(env, "%s", reg_type_str(env, t));
- if (base_type(t) == PTR_TO_BTF_ID)
- verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
- verbose(env, "(");
-/*
- * _a stands for append, was shortened to avoid multiline statements below.
- * This macro is used to output a comma separated list of attributes.
+/* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which
+ * case they are equivalent, or it's STACK_ZERO, in which case we preserve
+ * more precise STACK_ZERO.
+ * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take
+ * env->allow_ptr_leaks into account and force STACK_MISC, if necessary.
*/
-#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
-
- if (reg->id)
- verbose_a("id=%d", reg->id);
- if (reg->ref_obj_id)
- verbose_a("ref_obj_id=%d", reg->ref_obj_id);
- if (type_is_non_owning_ref(reg->type))
- verbose_a("%s", "non_own_ref");
- if (t != SCALAR_VALUE)
- verbose_a("off=%d", reg->off);
- if (type_is_pkt_pointer(t))
- verbose_a("r=%d", reg->range);
- else if (base_type(t) == CONST_PTR_TO_MAP ||
- base_type(t) == PTR_TO_MAP_KEY ||
- base_type(t) == PTR_TO_MAP_VALUE)
- verbose_a("ks=%d,vs=%d",
- reg->map_ptr->key_size,
- reg->map_ptr->value_size);
- if (tnum_is_const(reg->var_off)) {
- /* Typically an immediate SCALAR_VALUE, but
- * could be a pointer whose offset is too big
- * for reg->off
- */
- verbose_a("imm=%llx", reg->var_off.value);
- } else {
- print_scalar_ranges(env, reg, &sep);
- if (!tnum_is_unknown(reg->var_off)) {
- char tn_buf[48];
-
- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose_a("var_off=%s", tn_buf);
- }
- }
-#undef verbose_a
-
- verbose(env, ")");
- }
- }
- for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
- char types_buf[BPF_REG_SIZE + 1];
- bool valid = false;
- int j;
-
- for (j = 0; j < BPF_REG_SIZE; j++) {
- if (state->stack[i].slot_type[j] != STACK_INVALID)
- valid = true;
- types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
- }
- types_buf[BPF_REG_SIZE] = 0;
- if (!valid)
- continue;
- if (!print_all && !stack_slot_scratched(env, i))
- continue;
- switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
- case STACK_SPILL:
- reg = &state->stack[i].spilled_ptr;
- t = reg->type;
-
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
- if (t == SCALAR_VALUE && reg->precise)
- verbose(env, "P");
- if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
- verbose(env, "%lld", reg->var_off.value + reg->off);
- break;
- case STACK_DYNPTR:
- i += BPF_DYNPTR_NR_SLOTS - 1;
- reg = &state->stack[i].spilled_ptr;
-
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
- if (reg->ref_obj_id)
- verbose(env, "(ref_id=%d)", reg->ref_obj_id);
- break;
- case STACK_ITER:
- /* only main slot has ref_obj_id set; skip others */
- reg = &state->stack[i].spilled_ptr;
- if (!reg->ref_obj_id)
- continue;
-
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
- iter_type_str(reg->iter.btf, reg->iter.btf_id),
- reg->ref_obj_id, iter_state_str(reg->iter.state),
- reg->iter.depth);
- break;
- case STACK_MISC:
- case STACK_ZERO:
- default:
- reg = &state->stack[i].spilled_ptr;
-
- for (j = 0; j < BPF_REG_SIZE; j++)
- types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
- types_buf[BPF_REG_SIZE] = 0;
-
- verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
- print_liveness(env, reg->live);
- verbose(env, "=%s", types_buf);
- break;
- }
- }
- if (state->acquired_refs && state->refs[0].id) {
- verbose(env, " refs=%d", state->refs[0].id);
- for (i = 1; i < state->acquired_refs; i++)
- if (state->refs[i].id)
- verbose(env, ",%d", state->refs[i].id);
- }
- if (state->in_callback_fn)
- verbose(env, " cb");
- if (state->in_async_callback_fn)
- verbose(env, " async_cb");
- verbose(env, "\n");
- if (!print_all)
- mark_verifier_state_clean(env);
-}
-
-static inline u32 vlog_alignment(u32 pos)
+static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
{
- return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
- BPF_LOG_MIN_ALIGNMENT) - pos - 1;
+ if (*stype == STACK_ZERO)
+ return;
+ if (env->allow_ptr_leaks && *stype == STACK_INVALID)
+ return;
+ *stype = STACK_MISC;
}
-static void print_insn_state(struct bpf_verifier_env *env,
- const struct bpf_func_state *state)
+static void scrub_spilled_slot(u8 *stype)
{
- if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
- /* remove new line character */
- bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
- verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
- } else {
- verbose(env, "%d:", env->insn_idx);
- }
- print_verifier_state(env, state, false);
+ if (*stype != STACK_INVALID)
+ *stype = STACK_MISC;
}
/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
@@ -1685,9 +1270,16 @@ static int resize_reference_state(struct bpf_func_state *state, size_t n)
return 0;
}
-static int grow_stack_state(struct bpf_func_state *state, int size)
+/* Possibly update state->allocated_stack to be at least size bytes. Also
+ * possibly update the function's high-water mark in its bpf_subprog_info.
+ */
+static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size)
{
- size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
+ size_t old_n = state->allocated_stack / BPF_REG_SIZE, n;
+
+ /* The stack size is always a multiple of BPF_REG_SIZE. */
+ size = round_up(size, BPF_REG_SIZE);
+ n = size / BPF_REG_SIZE;
if (old_n >= n)
return 0;
@@ -1697,6 +1289,11 @@ static int grow_stack_state(struct bpf_func_state *state, int size)
return -ENOMEM;
state->allocated_stack = size;
+
+ /* update known max for given subprogram */
+ if (env->subprog_info[state->subprogno].stack_depth < size)
+ env->subprog_info[state->subprogno].stack_depth = size;
+
return 0;
}
@@ -1796,8 +1393,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
int i, err;
dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
- src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
- GFP_USER);
+ src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
+ GFP_USER);
if (!dst_state->jmp_history)
return -ENOMEM;
dst_state->jmp_history_cnt = src->jmp_history_cnt;
@@ -2172,10 +1769,14 @@ static void __mark_reg_known_zero(struct bpf_reg_state *reg)
__mark_reg_known(reg, 0);
}
-static void __mark_reg_const_zero(struct bpf_reg_state *reg)
+static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
__mark_reg_known(reg, 0);
reg->type = SCALAR_VALUE;
+ /* all scalars are assumed imprecise initially (unless unprivileged,
+ * in which case everything is forced to be precise)
+ */
+ reg->precise = !env->bpf_capable;
}
static void mark_reg_known_zero(struct bpf_verifier_env *env,
@@ -2341,69 +1942,214 @@ static void __update_reg_bounds(struct bpf_reg_state *reg)
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
{
- /* Learn sign from signed bounds.
- * If we cannot cross the sign boundary, then signed and unsigned bounds
- * are the same, so combine. This works even in the negative case, e.g.
- * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
+ /* If upper 32 bits of u64/s64 range don't change, we can use lower 32
+ * bits to improve our u32/s32 boundaries.
+ *
+ * E.g., the case where we have upper 32 bits as zero ([10, 20] in
+ * u64) is pretty trivial, it's obvious that in u32 we'll also have
+ * [10, 20] range. But this property holds for any 64-bit range as
+ * long as upper 32 bits in that entire range of values stay the same.
+ *
+ * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311]
+ * in decimal) has the same upper 32 bits throughout all the values in
+ * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15])
+ * range.
+ *
+ * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32,
+ * following the rules outlined below about u64/s64 correspondence
+ * (which equally applies to u32 vs s32 correspondence). In general it
+ * depends on actual hexadecimal values of 32-bit range. They can form
+ * only valid u32, or only valid s32 ranges in some cases.
+ *
+ * So we use all these insights to derive bounds for subregisters here.
*/
- if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
- reg->s32_min_value = reg->u32_min_value =
- max_t(u32, reg->s32_min_value, reg->u32_min_value);
- reg->s32_max_value = reg->u32_max_value =
- min_t(u32, reg->s32_max_value, reg->u32_max_value);
- return;
+ if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) {
+ /* u64 to u32 casting preserves validity of low 32 bits as
+ * a range, if upper 32 bits are the same
+ */
+ reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value);
+ reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value);
+
+ if ((s32)reg->umin_value <= (s32)reg->umax_value) {
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
+ }
+ }
+ if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) {
+ /* low 32 bits should form a proper u32 range */
+ if ((u32)reg->smin_value <= (u32)reg->smax_value) {
+ reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value);
+ reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value);
+ }
+ /* low 32 bits should form a proper s32 range */
+ if ((s32)reg->smin_value <= (s32)reg->smax_value) {
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
+ }
+ }
+ /* Special case where upper bits form a small sequence of two
+ * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
+ * 0x00000000 is also valid), while lower bits form a proper s32 range
+ * going from negative numbers to positive numbers. E.g., let's say we
+ * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]).
+ * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff,
+ * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits,
+ * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]).
+ * Note that it doesn't have to be 0xffffffff going to 0x00000000 in
+ * upper 32 bits. As a random example, s64 range
+ * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range
+ * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister.
+ */
+ if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) &&
+ (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) {
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value);
+ }
+ if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) &&
+ (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) {
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value);
+ }
+ /* if u32 range forms a valid s32 range (due to matching sign bit),
+ * try to learn from that
+ */
+ if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) {
+ reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value);
+ reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value);
}
- /* Learn sign from unsigned bounds. Signed bounds cross the sign
- * boundary, so we must be careful.
+ /* If we cannot cross the sign boundary, then signed and unsigned bounds
+ * are the same, so combine. This works even in the negative case, e.g.
+ * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
- if ((s32)reg->u32_max_value >= 0) {
- /* Positive. We can't learn anything from the smin, but smax
- * is positive, hence safe.
- */
- reg->s32_min_value = reg->u32_min_value;
- reg->s32_max_value = reg->u32_max_value =
- min_t(u32, reg->s32_max_value, reg->u32_max_value);
- } else if ((s32)reg->u32_min_value < 0) {
- /* Negative. We can't learn anything from the smax, but smin
- * is negative, hence safe.
- */
- reg->s32_min_value = reg->u32_min_value =
- max_t(u32, reg->s32_min_value, reg->u32_min_value);
- reg->s32_max_value = reg->u32_max_value;
+ if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
+ reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value);
+ reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value);
}
}
static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
{
- /* Learn sign from signed bounds.
- * If we cannot cross the sign boundary, then signed and unsigned bounds
+ /* If u64 range forms a valid s64 range (due to matching sign bit),
+ * try to learn from that. Let's do a bit of ASCII art to see when
+ * this is happening. Let's take u64 range first:
+ *
+ * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
+ * |-------------------------------|--------------------------------|
+ *
+ * Valid u64 range is formed when umin and umax are anywhere in the
+ * range [0, U64_MAX], and umin <= umax. u64 case is simple and
+ * straightforward. Let's see how s64 range maps onto the same range
+ * of values, annotated below the line for comparison:
+ *
+ * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
+ * |-------------------------------|--------------------------------|
+ * 0 S64_MAX S64_MIN -1
+ *
+ * So s64 values basically start in the middle and they are logically
+ * contiguous to the right of it, wrapping around from -1 to 0, and
+ * then finishing as S64_MAX (0x7fffffffffffffff) right before
+ * S64_MIN. We can try drawing the continuity of u64 vs s64 values
+ * more visually as mapped to sign-agnostic range of hex values.
+ *
+ * u64 start u64 end
+ * _______________________________________________________________
+ * / \
+ * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX
+ * |-------------------------------|--------------------------------|
+ * 0 S64_MAX S64_MIN -1
+ * / \
+ * >------------------------------ ------------------------------->
+ * s64 continues... s64 end s64 start s64 "midpoint"
+ *
+ * What this means is that, in general, we can't always derive
+ * something new about u64 from any random s64 range, and vice versa.
+ *
+ * But we can do that in two particular cases. One is when entire
+ * u64/s64 range is *entirely* contained within left half of the above
+ * diagram or when it is *entirely* contained in the right half. I.e.:
+ *
+ * |-------------------------------|--------------------------------|
+ * ^ ^ ^ ^
+ * A B C D
+ *
+ * [A, B] and [C, D] are contained entirely in their respective halves
+ * and form valid contiguous ranges as both u64 and s64 values. [A, B]
+ * will be non-negative both as u64 and s64 (and in fact it will be
+ * identical ranges no matter the signedness). [C, D] treated as s64
+ * will be a range of negative values, while in u64 it will be
+ * non-negative range of values larger than 0x8000000000000000.
+ *
+ * Now, any other range here can't be represented in both u64 and s64
+ * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid
+ * contiguous u64 ranges, but they are discontinuous in s64. [B, C]
+ * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX],
+ * for example. Similarly, valid s64 range [D, A] (going from negative
+ * to positive values), would be two separate [D, U64_MAX] and [0, A]
+ * ranges as u64. Currently reg_state can't represent two segments per
+ * numeric domain, so in such situations we can only derive maximal
+ * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64).
+ *
+ * So we use these facts to derive umin/umax from smin/smax and vice
+ * versa only if they stay within the same "half". This is equivalent
+ * to checking sign bit: lower half will have sign bit as zero, upper
+ * half have sign bit 1. Below in code we simplify this by just
+ * casting umin/umax as smin/smax and checking if they form valid
+ * range, and vice versa. Those are equivalent checks.
+ */
+ if ((s64)reg->umin_value <= (s64)reg->umax_value) {
+ reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value);
+ reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value);
+ }
+ /* If we cannot cross the sign boundary, then signed and unsigned bounds
* are the same, so combine. This works even in the negative case, e.g.
* -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
*/
- if (reg->smin_value >= 0 || reg->smax_value < 0) {
- reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
- reg->umin_value);
- reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
- reg->umax_value);
- return;
+ if ((u64)reg->smin_value <= (u64)reg->smax_value) {
+ reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value);
+ reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value);
}
- /* Learn sign from unsigned bounds. Signed bounds cross the sign
- * boundary, so we must be careful.
+}
+
+static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg)
+{
+ /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit
+ * values on both sides of 64-bit range in hope to have tigher range.
+ * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from
+ * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff].
+ * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound
+ * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of
+ * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a
+ * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff].
+ * We just need to make sure that derived bounds we are intersecting
+ * with are well-formed ranges in respecitve s64 or u64 domain, just
+ * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments.
*/
- if ((s64)reg->umax_value >= 0) {
- /* Positive. We can't learn anything from the smin, but smax
- * is positive, hence safe.
- */
- reg->smin_value = reg->umin_value;
- reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
- reg->umax_value);
- } else if ((s64)reg->umin_value < 0) {
- /* Negative. We can't learn anything from the smax, but smin
- * is negative, hence safe.
- */
- reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
- reg->umin_value);
- reg->smax_value = reg->umax_value;
+ __u64 new_umin, new_umax;
+ __s64 new_smin, new_smax;
+
+ /* u32 -> u64 tightening, it's always well-formed */
+ new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value;
+ new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value;
+ reg->umin_value = max_t(u64, reg->umin_value, new_umin);
+ reg->umax_value = min_t(u64, reg->umax_value, new_umax);
+ /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */
+ new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value;
+ new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value;
+ reg->smin_value = max_t(s64, reg->smin_value, new_smin);
+ reg->smax_value = min_t(s64, reg->smax_value, new_smax);
+
+ /* if s32 can be treated as valid u32 range, we can use it as well */
+ if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) {
+ /* s32 -> u64 tightening */
+ new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
+ new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
+ reg->umin_value = max_t(u64, reg->umin_value, new_umin);
+ reg->umax_value = min_t(u64, reg->umax_value, new_umax);
+ /* s32 -> s64 tightening */
+ new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value;
+ new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value;
+ reg->smin_value = max_t(s64, reg->smin_value, new_smin);
+ reg->smax_value = min_t(s64, reg->smax_value, new_smax);
}
}
@@ -2411,6 +2157,7 @@ static void __reg_deduce_bounds(struct bpf_reg_state *reg)
{
__reg32_deduce_bounds(reg);
__reg64_deduce_bounds(reg);
+ __reg_deduce_mixed_bounds(reg);
}
/* Attempts to improve var_off based on unsigned min/max information */
@@ -2432,6 +2179,7 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
__update_reg_bounds(reg);
/* We might have learned something about the sign bit. */
__reg_deduce_bounds(reg);
+ __reg_deduce_bounds(reg);
/* We might have learned some bits from the bounds. */
__reg_bound_offset(reg);
/* Intersecting with the old var_off might have improved our bounds
@@ -2441,6 +2189,56 @@ static void reg_bounds_sync(struct bpf_reg_state *reg)
__update_reg_bounds(reg);
}
+static int reg_bounds_sanity_check(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, const char *ctx)
+{
+ const char *msg;
+
+ if (reg->umin_value > reg->umax_value ||
+ reg->smin_value > reg->smax_value ||
+ reg->u32_min_value > reg->u32_max_value ||
+ reg->s32_min_value > reg->s32_max_value) {
+ msg = "range bounds violation";
+ goto out;
+ }
+
+ if (tnum_is_const(reg->var_off)) {
+ u64 uval = reg->var_off.value;
+ s64 sval = (s64)uval;
+
+ if (reg->umin_value != uval || reg->umax_value != uval ||
+ reg->smin_value != sval || reg->smax_value != sval) {
+ msg = "const tnum out of sync with range bounds";
+ goto out;
+ }
+ }
+
+ if (tnum_subreg_is_const(reg->var_off)) {
+ u32 uval32 = tnum_subreg(reg->var_off).value;
+ s32 sval32 = (s32)uval32;
+
+ if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 ||
+ reg->s32_min_value != sval32 || reg->s32_max_value != sval32) {
+ msg = "const subreg tnum out of sync with range bounds";
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ verbose(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] "
+ "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n",
+ ctx, msg, reg->umin_value, reg->umax_value,
+ reg->smin_value, reg->smax_value,
+ reg->u32_min_value, reg->u32_max_value,
+ reg->s32_min_value, reg->s32_max_value,
+ reg->var_off.value, reg->var_off.mask);
+ if (env->test_reg_invariants)
+ return -EFAULT;
+ __mark_reg_unbounded(reg);
+ return 0;
+}
+
static bool __reg32_bound_s64(s32 a)
{
return a >= 0 && a <= S32_MAX;
@@ -2465,51 +2263,6 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
}
}
-static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
-{
- /* special case when 64-bit register has upper 32-bit register
- * zeroed. Typically happens after zext or <<32, >>32 sequence
- * allowing us to use 32-bit bounds directly,
- */
- if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
- __reg_assign_32_into_64(reg);
- } else {
- /* Otherwise the best we can do is push lower 32bit known and
- * unknown bits into register (var_off set from jmp logic)
- * then learn as much as possible from the 64-bit tnum
- * known and unknown bits. The previous smin/smax bounds are
- * invalid here because of jmp32 compare so mark them unknown
- * so they do not impact tnum bounds calculation.
- */
- __mark_reg64_unbounded(reg);
- }
- reg_bounds_sync(reg);
-}
-
-static bool __reg64_bound_s32(s64 a)
-{
- return a >= S32_MIN && a <= S32_MAX;
-}
-
-static bool __reg64_bound_u32(u64 a)
-{
- return a >= U32_MIN && a <= U32_MAX;
-}
-
-static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
-{
- __mark_reg32_unbounded(reg);
- if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
- reg->s32_min_value = (s32)reg->smin_value;
- reg->s32_max_value = (s32)reg->smax_value;
- }
- if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
- reg->u32_min_value = (u32)reg->umin_value;
- reg->u32_max_value = (u32)reg->umax_value;
- }
- reg_bounds_sync(reg);
-}
-
/* Mark a register as having a completely unknown (scalar) value. */
static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg)
@@ -2597,6 +2350,11 @@ static void init_reg_state(struct bpf_verifier_env *env,
regs[BPF_REG_FP].frameno = state->frameno;
}
+static struct bpf_retval_range retval_range(s32 minval, s32 maxval)
+{
+ return (struct bpf_retval_range){ minval, maxval };
+}
+
#define BPF_MAIN_FUNC (-1)
static void init_func_state(struct bpf_verifier_env *env,
struct bpf_func_state *state,
@@ -2605,7 +2363,7 @@ static void init_func_state(struct bpf_verifier_env *env,
state->callsite = callsite;
state->frameno = frameno;
state->subprogno = subprogno;
- state->callback_ret_range = tnum_range(0, 0);
+ state->callback_ret_range = retval_range(0, 0);
init_reg_state(env, state);
mark_verifier_state_scratched(env);
}
@@ -3149,6 +2907,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
if (env->subprog_info[i].start != ex_cb_insn)
continue;
env->exception_callback_subprog = i;
+ mark_subprog_exc_cb(env, i);
break;
}
}
@@ -3505,6 +3264,21 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return __check_reg_arg(env, state->regs, regno, t);
}
+static int insn_stack_access_flags(int frameno, int spi)
+{
+ return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
+}
+
+static int insn_stack_access_spi(int insn_flags)
+{
+ return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
+}
+
+static int insn_stack_access_frameno(int insn_flags)
+{
+ return insn_flags & INSN_F_FRAMENO_MASK;
+}
+
static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].jmp_point = true;
@@ -3516,28 +3290,51 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
}
/* for any branch, call, exit record the history of jmps in the given state */
-static int push_jmp_history(struct bpf_verifier_env *env,
- struct bpf_verifier_state *cur)
+static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
+ int insn_flags)
{
u32 cnt = cur->jmp_history_cnt;
- struct bpf_idx_pair *p;
+ struct bpf_jmp_history_entry *p;
size_t alloc_size;
- if (!is_jmp_point(env, env->insn_idx))
+ /* combine instruction flags if we already recorded this instruction */
+ if (env->cur_hist_ent) {
+ /* atomic instructions push insn_flags twice, for READ and
+ * WRITE sides, but they should agree on stack slot
+ */
+ WARN_ONCE((env->cur_hist_ent->flags & insn_flags) &&
+ (env->cur_hist_ent->flags & insn_flags) != insn_flags,
+ "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
+ env->insn_idx, env->cur_hist_ent->flags, insn_flags);
+ env->cur_hist_ent->flags |= insn_flags;
return 0;
+ }
cnt++;
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
if (!p)
return -ENOMEM;
- p[cnt - 1].idx = env->insn_idx;
- p[cnt - 1].prev_idx = env->prev_insn_idx;
cur->jmp_history = p;
+
+ p = &cur->jmp_history[cnt - 1];
+ p->idx = env->insn_idx;
+ p->prev_idx = env->prev_insn_idx;
+ p->flags = insn_flags;
cur->jmp_history_cnt = cnt;
+ env->cur_hist_ent = p;
+
return 0;
}
+static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
+ u32 hist_end, int insn_idx)
+{
+ if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
+ return &st->jmp_history[hist_end - 1];
+ return NULL;
+}
+
/* Backtrack one insn at a time. If idx is not at the top of recorded
* history then previous instruction came from straight line execution.
* Return -ENOENT if we exhausted all instructions within given state.
@@ -3664,16 +3461,6 @@ static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u3
bt->stack_masks[frame] &= ~(1ull << slot);
}
-static inline void bt_set_slot(struct backtrack_state *bt, u32 slot)
-{
- bt_set_frame_slot(bt, bt->frame, slot);
-}
-
-static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot)
-{
- bt_clear_frame_slot(bt, bt->frame, slot);
-}
-
static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
{
return bt->reg_masks[frame];
@@ -3699,9 +3486,9 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
return bt->reg_masks[bt->frame] & (1 << reg);
}
-static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
+static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
{
- return bt->stack_masks[bt->frame] & (1ull << slot);
+ return bt->stack_masks[frame] & (1ull << slot);
}
/* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
@@ -3755,7 +3542,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
* - *was* processed previously during backtracking.
*/
static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
- struct backtrack_state *bt)
+ struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
{
const struct bpf_insn_cbs cbs = {
.cb_call = disasm_kfunc_name,
@@ -3768,7 +3555,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
u8 mode = BPF_MODE(insn->code);
u32 dreg = insn->dst_reg;
u32 sreg = insn->src_reg;
- u32 spi, i;
+ u32 spi, i, fr;
if (insn->code == 0)
return 0;
@@ -3829,20 +3616,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* by 'precise' mark in corresponding register of this state.
* No further tracking necessary.
*/
- if (insn->src_reg != BPF_REG_FP)
+ if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
return 0;
-
/* dreg = *(u64 *)[fp - off] was a fill from the stack.
* that [fp - off] slot contains scalar that needs to be
* tracked with precision
*/
- spi = (-insn->off - 1) / BPF_REG_SIZE;
- if (spi >= 64) {
- verbose(env, "BUG spi %d\n", spi);
- WARN_ONCE(1, "verifier backtracking bug");
- return -EFAULT;
- }
- bt_set_slot(bt, spi);
+ spi = insn_stack_access_spi(hist->flags);
+ fr = insn_stack_access_frameno(hist->flags);
+ bt_set_frame_slot(bt, fr, spi);
} else if (class == BPF_STX || class == BPF_ST) {
if (bt_is_reg_set(bt, dreg))
/* stx & st shouldn't be using _scalar_ dst_reg
@@ -3851,17 +3633,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
*/
return -ENOTSUPP;
/* scalars can only be spilled into stack */
- if (insn->dst_reg != BPF_REG_FP)
+ if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
return 0;
- spi = (-insn->off - 1) / BPF_REG_SIZE;
- if (spi >= 64) {
- verbose(env, "BUG spi %d\n", spi);
- WARN_ONCE(1, "verifier backtracking bug");
- return -EFAULT;
- }
- if (!bt_is_slot_set(bt, spi))
+ spi = insn_stack_access_spi(hist->flags);
+ fr = insn_stack_access_frameno(hist->flags);
+ if (!bt_is_frame_slot_set(bt, fr, spi))
return 0;
- bt_clear_slot(bt, spi);
+ bt_clear_frame_slot(bt, fr, spi);
if (class == BPF_STX)
bt_set_reg(bt, sreg);
} else if (class == BPF_JMP || class == BPF_JMP32) {
@@ -3905,10 +3683,14 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
- /* we don't track register spills perfectly,
- * so fallback to force-precise instead of failing */
- if (bt_stack_mask(bt) != 0)
- return -ENOTSUPP;
+ /* we are now tracking register spills correctly,
+ * so any instance of leftover slots is a bug
+ */
+ if (bt_stack_mask(bt) != 0) {
+ verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)");
+ return -EFAULT;
+ }
/* propagate r1-r5 to the caller */
for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
if (bt_is_reg_set(bt, i)) {
@@ -3933,8 +3715,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
WARN_ONCE(1, "verifier backtracking bug");
return -EFAULT;
}
- if (bt_stack_mask(bt) != 0)
- return -ENOTSUPP;
+ if (bt_stack_mask(bt) != 0) {
+ verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
+ WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)");
+ return -EFAULT;
+ }
/* clear r1-r5 in callback subprog's mask */
for (i = BPF_REG_1; i <= BPF_REG_5; i++)
bt_clear_reg(bt, i);
@@ -4371,6 +4156,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
for (;;) {
DECLARE_BITMAP(mask, 64);
u32 history = st->jmp_history_cnt;
+ struct bpf_jmp_history_entry *hist;
if (env->log.level & BPF_LOG_LEVEL2) {
verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
@@ -4434,7 +4220,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
err = 0;
skip_first = false;
} else {
- err = backtrack_insn(env, i, subseq_idx, bt);
+ hist = get_jmp_hist_entry(st, history, i);
+ err = backtrack_insn(env, i, subseq_idx, hist, bt);
}
if (err == -ENOTSUPP) {
mark_all_scalars_precise(env, env->cur_state);
@@ -4487,22 +4274,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
for_each_set_bit(i, mask, 64) {
if (i >= func->allocated_stack / BPF_REG_SIZE) {
- /* the sequence of instructions:
- * 2: (bf) r3 = r10
- * 3: (7b) *(u64 *)(r3 -8) = r0
- * 4: (79) r4 = *(u64 *)(r10 -8)
- * doesn't contain jmps. It's backtracked
- * as a single block.
- * During backtracking insn 3 is not recognized as
- * stack access, so at the end of backtracking
- * stack slot fp-8 is still marked in stack_mask.
- * However the parent state may not have accessed
- * fp-8 and it's "unallocated" stack space.
- * In such case fallback to conservative.
- */
- mark_all_scalars_precise(env, env->cur_state);
- bt_reset(bt);
- return 0;
+ verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n",
+ i, func->allocated_stack / BPF_REG_SIZE);
+ WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)");
+ return -EFAULT;
}
if (!is_spilled_scalar_reg(&func->stack[i])) {
@@ -4592,9 +4367,17 @@ static bool register_is_null(struct bpf_reg_state *reg)
return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
}
-static bool register_is_const(struct bpf_reg_state *reg)
+/* check if register is a constant scalar value */
+static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32)
{
- return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
+ return reg->type == SCALAR_VALUE &&
+ tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off);
+}
+
+/* assuming is_reg_const() is true, return constant value of a register */
+static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32)
+{
+ return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value;
}
static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
@@ -4631,7 +4414,8 @@ static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_
dst->live = live;
}
-static void save_register_state(struct bpf_func_state *state,
+static void save_register_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg,
int size)
{
@@ -4646,7 +4430,7 @@ static void save_register_state(struct bpf_func_state *state,
/* size < 8 bytes spill */
for (; i; i--)
- scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
+ mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]);
}
static bool is_bpf_st_mem(struct bpf_insn *insn)
@@ -4667,16 +4451,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
struct bpf_reg_state *reg = NULL;
- u32 dst_reg = insn->dst_reg;
+ int insn_flags = insn_stack_access_flags(state->frameno, spi);
- err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
- if (err)
- return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
if (!env->allow_ptr_leaks &&
- state->stack[spi].slot_type[0] == STACK_SPILL &&
+ is_spilled_reg(&state->stack[spi]) &&
size != BPF_REG_SIZE) {
verbose(env, "attempt to corrupt spilled pointer on stack\n");
return -EACCES;
@@ -4706,20 +4487,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err;
mark_stack_slot_scratched(env, spi);
- if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
- !register_is_null(reg) && env->bpf_capable) {
- if (dst_reg != BPF_REG_FP) {
- /* The backtracking logic can only recognize explicit
- * stack slot address like [fp - 8]. Other spill of
- * scalar via different register has to be conservative.
- * Backtrack from here and mark all registers as precise
- * that contributed into 'reg' being a constant.
- */
- err = mark_chain_precision(env, value_regno);
- if (err)
- return err;
- }
- save_register_state(state, spi, reg, size);
+ if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && env->bpf_capable) {
+ save_register_state(env, state, spi, reg, size);
/* Break the relation on a narrowing spill. */
if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
state->stack[spi].spilled_ptr.id = 0;
@@ -4729,7 +4498,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
__mark_reg_known(&fake_reg, insn->imm);
fake_reg.type = SCALAR_VALUE;
- save_register_state(state, spi, &fake_reg, size);
+ save_register_state(env, state, spi, &fake_reg, size);
} else if (reg && is_spillable_regtype(reg->type)) {
/* register containing pointer is being spilled into stack */
if (size != BPF_REG_SIZE) {
@@ -4741,7 +4510,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
return -EINVAL;
}
- save_register_state(state, spi, reg, size);
+ save_register_state(env, state, spi, reg, size);
} else {
u8 type = STACK_MISC;
@@ -4766,7 +4535,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* when we zero initialize stack slots mark them as such */
if ((reg && register_is_null(reg)) ||
(!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
- /* backtracking doesn't work for STACK_ZERO yet. */
+ /* STACK_ZERO case happened because register spill
+ * wasn't properly aligned at the stack slot boundary,
+ * so it's not a register spill anymore; force
+ * originating register to be precise to make
+ * STACK_ZERO correct for subsequent states
+ */
err = mark_chain_precision(env, value_regno);
if (err)
return err;
@@ -4775,9 +4549,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
/* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
- state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
- type;
+ state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type;
+ insn_flags = 0; /* not a register spill */
}
+
+ if (insn_flags)
+ return push_jmp_history(env, env->cur_state, insn_flags);
return 0;
}
@@ -4827,10 +4604,6 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
(!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
writing_zero = true;
- err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
- if (err)
- return err;
-
for (i = min_off; i < max_off; i++) {
int spi;
@@ -4929,21 +4702,10 @@ static void mark_reg_stack_read(struct bpf_verifier_env *env,
zeros++;
}
if (zeros == max_off - min_off) {
- /* any access_size read into register is zero extended,
- * so the whole register == const_zero
+ /* Any access_size read into register is zero extended,
+ * so the whole register == const_zero.
*/
- __mark_reg_const_zero(&state->regs[dst_regno]);
- /* backtracking doesn't support STACK_ZERO yet,
- * so mark it precise here, so that later
- * backtracking can stop here.
- * Backtracking may not need this if this register
- * doesn't participate in pointer adjustment.
- * Forward propagation of precise flag is not
- * necessary either. This mark is only to stop
- * backtracking. Any register that contributed
- * to const 0 was marked precise before spill.
- */
- state->regs[dst_regno].precise = true;
+ __mark_reg_const_zero(env, &state->regs[dst_regno]);
} else {
/* have read misc data from the stack */
mark_reg_unknown(env, state->regs, dst_regno);
@@ -4970,6 +4732,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
struct bpf_reg_state *reg;
u8 *stype, type;
+ int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
stype = reg_state->stack[spi].slot_type;
reg = &reg_state->stack[spi].spilled_ptr;
@@ -5002,25 +4765,42 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;
} else {
+ int spill_cnt = 0, zero_cnt = 0;
+
for (i = 0; i < size; i++) {
type = stype[(slot - i) % BPF_REG_SIZE];
- if (type == STACK_SPILL)
+ if (type == STACK_SPILL) {
+ spill_cnt++;
continue;
+ }
if (type == STACK_MISC)
continue;
+ if (type == STACK_ZERO) {
+ zero_cnt++;
+ continue;
+ }
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size);
return -EACCES;
}
- mark_reg_unknown(env, state->regs, dst_regno);
+
+ if (spill_cnt == size &&
+ tnum_is_const(reg->var_off) && reg->var_off.value == 0) {
+ __mark_reg_const_zero(env, &state->regs[dst_regno]);
+ /* this IS register fill, so keep insn_flags */
+ } else if (zero_cnt == size) {
+ /* similarly to mark_reg_stack_read(), preserve zeroes */
+ __mark_reg_const_zero(env, &state->regs[dst_regno]);
+ insn_flags = 0; /* not restoring original register state */
+ } else {
+ mark_reg_unknown(env, state->regs, dst_regno);
+ insn_flags = 0; /* not restoring original register state */
+ }
}
state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
- return 0;
- }
-
- if (dst_regno >= 0) {
+ } else if (dst_regno >= 0) {
/* restore register state from stack */
copy_register_state(&state->regs[dst_regno], reg);
/* mark reg as written since spilled pointer state likely
@@ -5056,7 +4836,10 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
if (dst_regno >= 0)
mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
+ insn_flags = 0; /* we are not restoring spilled register */
}
+ if (insn_flags)
+ return push_jmp_history(env, env->cur_state, insn_flags);
return 0;
}
@@ -5346,8 +5129,8 @@ static int __check_ptr_off_reg(struct bpf_verifier_env *env,
return 0;
}
-int check_ptr_off_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno)
+static int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno)
{
return __check_ptr_off_reg(env, reg, regno, false);
}
@@ -5451,10 +5234,23 @@ BTF_SET_END(rcu_protected_types)
static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
{
if (!btf_is_kernel(btf))
- return false;
+ return true;
return btf_id_set_contains(&rcu_protected_types, btf_id);
}
+static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field)
+{
+ struct btf_struct_meta *meta;
+
+ if (btf_is_kernel(kptr_field->kptr.btf))
+ return NULL;
+
+ meta = btf_find_struct_meta(kptr_field->kptr.btf,
+ kptr_field->kptr.btf_id);
+
+ return meta ? meta->record : NULL;
+}
+
static bool rcu_safe_kptr(const struct btf_field *field)
{
const struct btf_field_kptr *kptr = &field->kptr;
@@ -5465,12 +5261,25 @@ static bool rcu_safe_kptr(const struct btf_field *field)
static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field)
{
+ struct btf_record *rec;
+ u32 ret;
+
+ ret = PTR_MAYBE_NULL;
if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) {
- if (kptr_field->type != BPF_KPTR_PERCPU)
- return PTR_MAYBE_NULL | MEM_RCU;
- return PTR_MAYBE_NULL | MEM_RCU | MEM_PERCPU;
+ ret |= MEM_RCU;
+ if (kptr_field->type == BPF_KPTR_PERCPU)
+ ret |= MEM_PERCPU;
+ else if (!btf_is_kernel(kptr_field->kptr.btf))
+ ret |= MEM_ALLOC;
+
+ rec = kptr_pointee_btf_record(kptr_field);
+ if (rec && btf_record_has_field(rec, BPF_GRAPH_NODE))
+ ret |= NON_OWN_REF;
+ } else {
+ ret |= PTR_UNTRUSTED;
}
- return PTR_MAYBE_NULL | PTR_UNTRUSTED;
+
+ return ret;
}
static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
@@ -5959,20 +5768,6 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict);
}
-static int update_stack_depth(struct bpf_verifier_env *env,
- const struct bpf_func_state *func,
- int off)
-{
- u16 stack = env->subprog_info[func->subprogno].stack_depth;
-
- if (stack >= -off)
- return 0;
-
- /* update known max for given subprogram */
- env->subprog_info[func->subprogno].stack_depth = -off;
- return 0;
-}
-
/* starting from main bpf function walk all instructions of the function
* and recursively walk all callees that given function can call.
* Ignore jump and exit insns.
@@ -6244,9 +6039,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* values are also truncated so we push 64-bit bounds into
* 32-bit bounds. Above were truncated < 32-bits already.
*/
- if (size >= 4)
- return;
- __reg_combine_64_into_32(reg);
+ if (size < 4) {
+ __mark_reg32_unbounded(reg);
+ reg_bounds_sync(reg);
+ }
}
static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
@@ -6761,13 +6557,14 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
* The minimum valid offset is -MAX_BPF_STACK for writes, and
* -state->allocated_stack for reads.
*/
-static int check_stack_slot_within_bounds(int off,
- struct bpf_func_state *state,
- enum bpf_access_type t)
+static int check_stack_slot_within_bounds(struct bpf_verifier_env *env,
+ s64 off,
+ struct bpf_func_state *state,
+ enum bpf_access_type t)
{
int min_valid_off;
- if (t == BPF_WRITE)
+ if (t == BPF_WRITE || env->allow_uninit_stack)
min_valid_off = -MAX_BPF_STACK;
else
min_valid_off = -state->allocated_stack;
@@ -6790,7 +6587,7 @@ static int check_stack_access_within_bounds(
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
struct bpf_func_state *state = func(env, reg);
- int min_off, max_off;
+ s64 min_off, max_off;
int err;
char *err_extra;
@@ -6803,11 +6600,8 @@ static int check_stack_access_within_bounds(
err_extra = " write to";
if (tnum_is_const(reg->var_off)) {
- min_off = reg->var_off.value + off;
- if (access_size > 0)
- max_off = min_off + access_size - 1;
- else
- max_off = min_off;
+ min_off = (s64)reg->var_off.value + off;
+ max_off = min_off + access_size;
} else {
if (reg->smax_value >= BPF_MAX_VAR_OFF ||
reg->smin_value <= -BPF_MAX_VAR_OFF) {
@@ -6816,15 +6610,12 @@ static int check_stack_access_within_bounds(
return -EACCES;
}
min_off = reg->smin_value + off;
- if (access_size > 0)
- max_off = reg->smax_value + off + access_size - 1;
- else
- max_off = min_off;
+ max_off = reg->smax_value + off + access_size;
}
- err = check_stack_slot_within_bounds(min_off, state, type);
- if (!err)
- err = check_stack_slot_within_bounds(max_off, state, type);
+ err = check_stack_slot_within_bounds(env, min_off, state, type);
+ if (!err && max_off > 0)
+ err = -EINVAL; /* out of stack access into non-negative offsets */
if (err) {
if (tnum_is_const(reg->var_off)) {
@@ -6834,11 +6625,16 @@ static int check_stack_access_within_bounds(
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
- err_extra, regno, tn_buf, access_size);
+ verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n",
+ err_extra, regno, tn_buf, off, access_size);
}
+ return err;
}
- return err;
+
+ /* Note that there is no stack access with offset zero, so the needed stack
+ * size is -min_off, not -min_off+1.
+ */
+ return grow_stack_state(env, state, -min_off /* size */);
}
/* check whether memory at (regno + off) is accessible for t = (read | write)
@@ -6853,7 +6649,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
{
struct bpf_reg_state *regs = cur_regs(env);
struct bpf_reg_state *reg = regs + regno;
- struct bpf_func_state *state;
int size, err = 0;
size = bpf_size_to_bytes(bpf_size);
@@ -6996,11 +6791,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (err)
return err;
- state = func(env, reg);
- err = update_stack_depth(env, state, off);
- if (err)
- return err;
-
if (t == BPF_READ)
err = check_stack_read(env, regno, off, size,
value_regno);
@@ -7189,13 +6979,13 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
if (err)
return err;
-
return 0;
}
/* When register 'regno' is used to read the stack (either directly or through
* a helper function) make sure that it's within stack boundary and, depending
- * on the access type, that all elements of the stack are initialized.
+ * on the access type and privileges, that all elements of the stack are
+ * initialized.
*
* 'off' includes 'regno->off', but not its dynamic part (if any).
*
@@ -7303,8 +7093,11 @@ static int check_stack_range_initialized(
slot = -i - 1;
spi = slot / BPF_REG_SIZE;
- if (state->allocated_stack <= slot)
- goto err;
+ if (state->allocated_stack <= slot) {
+ verbose(env, "verifier bug: allocated_stack too small");
+ return -EFAULT;
+ }
+
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC)
goto mark;
@@ -7328,7 +7121,6 @@ static int check_stack_range_initialized(
goto mark;
}
-err:
if (tnum_is_const(reg->var_off)) {
verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
err_extra, regno, min_off, i - min_off, access_size);
@@ -7353,7 +7145,7 @@ mark:
* helper may write to the entire memory range.
*/
}
- return update_stack_depth(env, state, min_off);
+ return 0;
}
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
@@ -7449,6 +7241,12 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
}
}
+/* verify arguments to helpers or kfuncs consisting of a pointer and an access
+ * size.
+ *
+ * @regno is the register containing the access size. regno-1 is the register
+ * containing the pointer.
+ */
static int check_mem_size_reg(struct bpf_verifier_env *env,
struct bpf_reg_state *reg, u32 regno,
bool zero_size_allowed,
@@ -7483,12 +7281,10 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
return -EACCES;
}
- if (reg->umin_value == 0) {
- err = check_helper_mem_access(env, regno - 1, 0,
- zero_size_allowed,
- meta);
- if (err)
- return err;
+ if (reg->umin_value == 0 && !zero_size_allowed) {
+ verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
+ regno, reg->umin_value, reg->umax_value);
+ return -EACCES;
}
if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
@@ -7504,8 +7300,8 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
return err;
}
-int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
- u32 regno, u32 mem_size)
+static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ u32 regno, u32 mem_size)
{
bool may_be_null = type_may_be_null(reg->type);
struct bpf_reg_state saved_reg;
@@ -8150,7 +7946,7 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
/* switch to DRAINED state, but keep the depth unchanged */
/* mark current iter state as drained and assume returned NULL */
cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
- __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]);
+ __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]);
return 0;
}
@@ -8490,9 +8286,9 @@ reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
return field;
}
-int check_func_arg_reg_off(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno,
- enum bpf_arg_type arg_type)
+static int check_func_arg_reg_off(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno,
+ enum bpf_arg_type arg_type)
{
u32 type = reg->type;
@@ -8626,6 +8422,54 @@ static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
return state->stack[spi].spilled_ptr.dynptr.type;
}
+static int check_reg_const_str(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno)
+{
+ struct bpf_map *map = reg->map_ptr;
+ int err;
+ int map_off;
+ u64 map_addr;
+ char *str_ptr;
+
+ if (reg->type != PTR_TO_MAP_VALUE)
+ return -EINVAL;
+
+ if (!bpf_map_is_rdonly(map)) {
+ verbose(env, "R%d does not point to a readonly map'\n", regno);
+ return -EACCES;
+ }
+
+ if (!tnum_is_const(reg->var_off)) {
+ verbose(env, "R%d is not a constant address'\n", regno);
+ return -EACCES;
+ }
+
+ if (!map->ops->map_direct_value_addr) {
+ verbose(env, "no direct value access support for this map type\n");
+ return -EACCES;
+ }
+
+ err = check_map_access(env, regno, reg->off,
+ map->value_size - reg->off, false,
+ ACCESS_HELPER);
+ if (err)
+ return err;
+
+ map_off = reg->off + reg->var_off.value;
+ err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
+ if (err) {
+ verbose(env, "direct value access on string failed\n");
+ return err;
+ }
+
+ str_ptr = (char *)(long)(map_addr);
+ if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
+ verbose(env, "string is not zero-terminated\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn,
@@ -8870,44 +8714,9 @@ skip_type_check:
}
case ARG_PTR_TO_CONST_STR:
{
- struct bpf_map *map = reg->map_ptr;
- int map_off;
- u64 map_addr;
- char *str_ptr;
-
- if (!bpf_map_is_rdonly(map)) {
- verbose(env, "R%d does not point to a readonly map'\n", regno);
- return -EACCES;
- }
-
- if (!tnum_is_const(reg->var_off)) {
- verbose(env, "R%d is not a constant address'\n", regno);
- return -EACCES;
- }
-
- if (!map->ops->map_direct_value_addr) {
- verbose(env, "no direct value access support for this map type\n");
- return -EACCES;
- }
-
- err = check_map_access(env, regno, reg->off,
- map->value_size - reg->off, false,
- ACCESS_HELPER);
+ err = check_reg_const_str(env, reg, regno);
if (err)
return err;
-
- map_off = reg->off + reg->var_off.value;
- err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
- if (err) {
- verbose(env, "direct value access on string failed\n");
- return err;
- }
-
- str_ptr = (char *)(long)(map_addr);
- if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
- verbose(env, "string is not zero-terminated\n");
- return -EINVAL;
- }
break;
}
case ARG_PTR_TO_KPTR:
@@ -9440,6 +9249,102 @@ err_out:
return err;
}
+static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ const struct btf *btf,
+ struct bpf_reg_state *regs)
+{
+ struct bpf_subprog_info *sub = subprog_info(env, subprog);
+ struct bpf_verifier_log *log = &env->log;
+ u32 i;
+ int ret;
+
+ ret = btf_prepare_func_args(env, subprog);
+ if (ret)
+ return ret;
+
+ /* check that BTF function arguments match actual types that the
+ * verifier sees.
+ */
+ for (i = 0; i < sub->arg_cnt; i++) {
+ u32 regno = i + 1;
+ struct bpf_reg_state *reg = &regs[regno];
+ struct bpf_subprog_arg_info *arg = &sub->args[i];
+
+ if (arg->arg_type == ARG_ANYTHING) {
+ if (reg->type != SCALAR_VALUE) {
+ bpf_log(log, "R%d is not a scalar\n", regno);
+ return -EINVAL;
+ }
+ } else if (arg->arg_type == ARG_PTR_TO_CTX) {
+ ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
+ if (ret < 0)
+ return ret;
+ /* If function expects ctx type in BTF check that caller
+ * is passing PTR_TO_CTX.
+ */
+ if (reg->type != PTR_TO_CTX) {
+ bpf_log(log, "arg#%d expects pointer to ctx\n", i);
+ return -EINVAL;
+ }
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) {
+ ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE);
+ if (ret < 0)
+ return ret;
+ if (check_mem_reg(env, reg, regno, arg->mem_size))
+ return -EINVAL;
+ if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) {
+ bpf_log(log, "arg#%d is expected to be non-NULL\n", i);
+ return -EINVAL;
+ }
+ } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
+ ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0);
+ if (ret)
+ return ret;
+ } else {
+ bpf_log(log, "verifier bug: unrecognized arg#%d type %d\n",
+ i, arg->arg_type);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/* Compare BTF of a function call with given bpf_reg_state.
+ * Returns:
+ * EFAULT - there is a verifier bug. Abort verification.
+ * EINVAL - there is a type mismatch or BTF is not available.
+ * 0 - BTF matches with what bpf_reg_state expects.
+ * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
+ */
+static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs)
+{
+ struct bpf_prog *prog = env->prog;
+ struct btf *btf = prog->aux->btf;
+ u32 btf_id;
+ int err;
+
+ if (!prog->aux->func_info)
+ return -EINVAL;
+
+ btf_id = prog->aux->func_info[subprog].type_id;
+ if (!btf_id)
+ return -EFAULT;
+
+ if (prog->aux->func_info_aux[subprog].unreliable)
+ return -EINVAL;
+
+ err = btf_check_func_arg_match(env, subprog, btf, regs);
+ /* Compiler optimizations can remove arguments from static functions
+ * or mismatched type can be passed into a global function.
+ * In such cases mark the function as unreliable from BTF point of view.
+ */
+ if (err)
+ prog->aux->func_info_aux[subprog].unreliable = true;
+ return err;
+}
+
static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int insn_idx, int subprog,
set_callee_state_fn set_callee_state_cb)
@@ -9529,13 +9434,18 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (err == -EFAULT)
return err;
if (subprog_is_global(env, subprog)) {
+ const char *sub_name = subprog_name(env, subprog);
+
if (err) {
- verbose(env, "Caller passes invalid args into func#%d\n", subprog);
+ verbose(env, "Caller passes invalid args into func#%d ('%s')\n",
+ subprog, sub_name);
return err;
}
- if (env->log.level & BPF_LOG_LEVEL)
- verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
+ verbose(env, "Func#%d ('%s') is global and assumed valid.\n",
+ subprog, sub_name);
+ /* mark global subprog for verifying after main prog */
+ subprog_aux(env, subprog)->called = true;
clear_caller_saved_regs(env, caller->regs);
/* All global functions return a 64-bit SCALAR_VALUE */
@@ -9635,7 +9545,7 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env,
return err;
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9657,7 +9567,7 @@ static int set_loop_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9687,7 +9597,7 @@ static int set_timer_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_async_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9706,7 +9616,7 @@ static int set_find_vma_callback_state(struct bpf_verifier_env *env,
callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
callee->regs[BPF_REG_2].btf = btf_vmlinux;
- callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
+ callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
/* pointer to stack or null */
callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
@@ -9715,7 +9625,7 @@ static int set_find_vma_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9738,7 +9648,7 @@ static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9770,7 +9680,7 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
- callee->callback_ret_range = tnum_range(0, 1);
+ callee->callback_ret_range = retval_range(0, 1);
return 0;
}
@@ -9799,6 +9709,11 @@ static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
return is_rbtree_lock_required_kfunc(kfunc_btf_id);
}
+static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg)
+{
+ return range.minval <= reg->smin_value && reg->smax_value <= range.maxval;
+}
+
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state, *prev_st;
@@ -9822,15 +9737,21 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
caller = state->frame[state->curframe - 1];
if (callee->in_callback_fn) {
- /* enforce R0 return value range [0, 1]. */
- struct tnum range = callee->callback_ret_range;
-
if (r0->type != SCALAR_VALUE) {
verbose(env, "R0 not a scalar value\n");
return -EACCES;
}
- if (!tnum_in(range, r0->var_off)) {
- verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
+
+ /* we are going to rely on register's precise value */
+ err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64);
+ err = err ?: mark_chain_precision(env, BPF_REG_0);
+ if (err)
+ return err;
+
+ /* enforce R0 return value range */
+ if (!retval_range_within(callee->callback_ret_range, r0)) {
+ verbose_invalid_scalar(env, r0, callee->callback_ret_range,
+ "At callback return", "R0");
return -EINVAL;
}
if (!calls_callback(env, callee->callsite)) {
@@ -9896,14 +9817,15 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
return 0;
}
-static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
- int func_id,
- struct bpf_call_arg_meta *meta)
+static int do_refine_retval_range(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs, int ret_type,
+ int func_id,
+ struct bpf_call_arg_meta *meta)
{
struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
if (ret_type != RET_INTEGER)
- return;
+ return 0;
switch (func_id) {
case BPF_FUNC_get_stack:
@@ -9929,6 +9851,8 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
reg_bounds_sync(ret_reg);
break;
}
+
+ return reg_bounds_sanity_check(env, ret_reg, "retval");
}
static int
@@ -9998,7 +9922,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
val = reg->var_off.value;
max = map->max_entries;
- if (!(register_is_const(reg) && val < max)) {
+ if (!(is_reg_const(reg, false) && val < max)) {
bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
return 0;
}
@@ -10593,7 +10517,9 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
regs[BPF_REG_0].ref_obj_id = id;
}
- do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
+ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta);
+ if (err)
+ return err;
err = check_map_func_compatibility(env, meta.map_ptr, func_id);
if (err)
@@ -10771,6 +10697,11 @@ static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param
return __kfunc_param_match_suffix(btf, arg, "__nullable");
}
+static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg)
+{
+ return __kfunc_param_match_suffix(btf, arg, "__str");
+}
+
static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
const struct btf_param *arg,
const char *name)
@@ -10914,6 +10845,7 @@ enum kfunc_ptr_arg_type {
KF_ARG_PTR_TO_RB_ROOT,
KF_ARG_PTR_TO_RB_NODE,
KF_ARG_PTR_TO_NULL,
+ KF_ARG_PTR_TO_CONST_STR,
};
enum special_kfunc_type {
@@ -11064,6 +10996,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
return KF_ARG_PTR_TO_RB_NODE;
+ if (is_kfunc_arg_const_str(meta->btf, &args[argno]))
+ return KF_ARG_PTR_TO_CONST_STR;
+
if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
if (!btf_type_is_struct(ref_t)) {
verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
@@ -11695,6 +11630,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_MEM_SIZE:
case KF_ARG_PTR_TO_CALLBACK:
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
+ case KF_ARG_PTR_TO_CONST_STR:
/* Trusted by default */
break;
default:
@@ -11966,6 +11902,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
meta->arg_btf = reg->btf;
meta->arg_btf_id = reg->btf_id;
break;
+ case KF_ARG_PTR_TO_CONST_STR:
+ if (reg->type != PTR_TO_MAP_VALUE) {
+ verbose(env, "arg#%d doesn't point to a const string\n", i);
+ return -EINVAL;
+ }
+ ret = check_reg_const_str(env, reg, regno);
+ if (ret)
+ return ret;
+ break;
}
}
@@ -12020,7 +11965,7 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env,
return 0;
}
-static int check_return_code(struct bpf_verifier_env *env, int regno);
+static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name);
static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
int *insn_idx_p)
@@ -12157,7 +12102,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
* to bpf_throw becomes the return value of the program.
*/
if (!env->exception_callback_subprog) {
- err = check_return_code(env, BPF_REG_1);
+ err = check_return_code(env, BPF_REG_1, "R1");
if (err < 0)
return err;
}
@@ -12196,20 +12141,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set)
return -ENOMEM;
- if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
- if (!bpf_global_percpu_ma_set) {
- mutex_lock(&bpf_percpu_ma_lock);
- if (!bpf_global_percpu_ma_set) {
- err = bpf_mem_alloc_init(&bpf_global_percpu_ma, 0, true);
- if (!err)
- bpf_global_percpu_ma_set = true;
- }
- mutex_unlock(&bpf_percpu_ma_lock);
- if (err)
- return err;
- }
- }
-
if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
return -EINVAL;
@@ -12230,6 +12161,35 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return -EINVAL;
}
+ if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
+ if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) {
+ verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n",
+ ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ if (!bpf_global_percpu_ma_set) {
+ mutex_lock(&bpf_percpu_ma_lock);
+ if (!bpf_global_percpu_ma_set) {
+ /* Charge memory allocated with bpf_global_percpu_ma to
+ * root memcg. The obj_cgroup for root memcg is NULL.
+ */
+ err = bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma, NULL);
+ if (!err)
+ bpf_global_percpu_ma_set = true;
+ }
+ mutex_unlock(&bpf_percpu_ma_lock);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&bpf_percpu_ma_lock);
+ err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size);
+ mutex_unlock(&bpf_percpu_ma_lock);
+ if (err)
+ return err;
+ }
+
struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id);
if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) {
if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) {
@@ -14086,13 +14046,12 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* check dest operand */
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
+ err = err ?: adjust_reg_min_max_vals(env, insn);
if (err)
return err;
-
- return adjust_reg_min_max_vals(env, insn);
}
- return 0;
+ return reg_bounds_sanity_check(env, &regs[insn->dst_reg], "alu");
}
static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
@@ -14174,161 +14133,130 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
}));
}
-static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
-{
- struct tnum subreg = tnum_subreg(reg->var_off);
- s32 sval = (s32)val;
-
- switch (opcode) {
- case BPF_JEQ:
- if (tnum_is_const(subreg))
- return !!tnum_equals_const(subreg, val);
- else if (val < reg->u32_min_value || val > reg->u32_max_value)
- return 0;
- else if (sval < reg->s32_min_value || sval > reg->s32_max_value)
- return 0;
- break;
- case BPF_JNE:
- if (tnum_is_const(subreg))
- return !tnum_equals_const(subreg, val);
- else if (val < reg->u32_min_value || val > reg->u32_max_value)
- return 1;
- else if (sval < reg->s32_min_value || sval > reg->s32_max_value)
- return 1;
- break;
- case BPF_JSET:
- if ((~subreg.mask & subreg.value) & val)
- return 1;
- if (!((subreg.mask | subreg.value) & val))
- return 0;
- break;
- case BPF_JGT:
- if (reg->u32_min_value > val)
- return 1;
- else if (reg->u32_max_value <= val)
- return 0;
- break;
- case BPF_JSGT:
- if (reg->s32_min_value > sval)
- return 1;
- else if (reg->s32_max_value <= sval)
- return 0;
- break;
- case BPF_JLT:
- if (reg->u32_max_value < val)
- return 1;
- else if (reg->u32_min_value >= val)
- return 0;
- break;
- case BPF_JSLT:
- if (reg->s32_max_value < sval)
- return 1;
- else if (reg->s32_min_value >= sval)
- return 0;
- break;
- case BPF_JGE:
- if (reg->u32_min_value >= val)
- return 1;
- else if (reg->u32_max_value < val)
- return 0;
- break;
- case BPF_JSGE:
- if (reg->s32_min_value >= sval)
- return 1;
- else if (reg->s32_max_value < sval)
- return 0;
- break;
- case BPF_JLE:
- if (reg->u32_max_value <= val)
- return 1;
- else if (reg->u32_min_value > val)
- return 0;
- break;
- case BPF_JSLE:
- if (reg->s32_max_value <= sval)
- return 1;
- else if (reg->s32_min_value > sval)
- return 0;
- break;
- }
-
- return -1;
-}
-
-
-static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
-{
- s64 sval = (s64)val;
+/*
+ * <reg1> <op> <reg2>, currently assuming reg2 is a constant
+ */
+static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
+ u8 opcode, bool is_jmp32)
+{
+ struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off;
+ struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off;
+ u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value;
+ u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value;
+ s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value;
+ s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value;
+ u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value;
+ u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value;
+ s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value;
+ s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value;
switch (opcode) {
case BPF_JEQ:
- if (tnum_is_const(reg->var_off))
- return !!tnum_equals_const(reg->var_off, val);
- else if (val < reg->umin_value || val > reg->umax_value)
+ /* constants, umin/umax and smin/smax checks would be
+ * redundant in this case because they all should match
+ */
+ if (tnum_is_const(t1) && tnum_is_const(t2))
+ return t1.value == t2.value;
+ /* non-overlapping ranges */
+ if (umin1 > umax2 || umax1 < umin2)
return 0;
- else if (sval < reg->smin_value || sval > reg->smax_value)
+ if (smin1 > smax2 || smax1 < smin2)
return 0;
+ if (!is_jmp32) {
+ /* if 64-bit ranges are inconclusive, see if we can
+ * utilize 32-bit subrange knowledge to eliminate
+ * branches that can't be taken a priori
+ */
+ if (reg1->u32_min_value > reg2->u32_max_value ||
+ reg1->u32_max_value < reg2->u32_min_value)
+ return 0;
+ if (reg1->s32_min_value > reg2->s32_max_value ||
+ reg1->s32_max_value < reg2->s32_min_value)
+ return 0;
+ }
break;
case BPF_JNE:
- if (tnum_is_const(reg->var_off))
- return !tnum_equals_const(reg->var_off, val);
- else if (val < reg->umin_value || val > reg->umax_value)
+ /* constants, umin/umax and smin/smax checks would be
+ * redundant in this case because they all should match
+ */
+ if (tnum_is_const(t1) && tnum_is_const(t2))
+ return t1.value != t2.value;
+ /* non-overlapping ranges */
+ if (umin1 > umax2 || umax1 < umin2)
return 1;
- else if (sval < reg->smin_value || sval > reg->smax_value)
+ if (smin1 > smax2 || smax1 < smin2)
return 1;
+ if (!is_jmp32) {
+ /* if 64-bit ranges are inconclusive, see if we can
+ * utilize 32-bit subrange knowledge to eliminate
+ * branches that can't be taken a priori
+ */
+ if (reg1->u32_min_value > reg2->u32_max_value ||
+ reg1->u32_max_value < reg2->u32_min_value)
+ return 1;
+ if (reg1->s32_min_value > reg2->s32_max_value ||
+ reg1->s32_max_value < reg2->s32_min_value)
+ return 1;
+ }
break;
case BPF_JSET:
- if ((~reg->var_off.mask & reg->var_off.value) & val)
+ if (!is_reg_const(reg2, is_jmp32)) {
+ swap(reg1, reg2);
+ swap(t1, t2);
+ }
+ if (!is_reg_const(reg2, is_jmp32))
+ return -1;
+ if ((~t1.mask & t1.value) & t2.value)
return 1;
- if (!((reg->var_off.mask | reg->var_off.value) & val))
+ if (!((t1.mask | t1.value) & t2.value))
return 0;
break;
case BPF_JGT:
- if (reg->umin_value > val)
+ if (umin1 > umax2)
return 1;
- else if (reg->umax_value <= val)
+ else if (umax1 <= umin2)
return 0;
break;
case BPF_JSGT:
- if (reg->smin_value > sval)
+ if (smin1 > smax2)
return 1;
- else if (reg->smax_value <= sval)
+ else if (smax1 <= smin2)
return 0;
break;
case BPF_JLT:
- if (reg->umax_value < val)
+ if (umax1 < umin2)
return 1;
- else if (reg->umin_value >= val)
+ else if (umin1 >= umax2)
return 0;
break;
case BPF_JSLT:
- if (reg->smax_value < sval)
+ if (smax1 < smin2)
return 1;
- else if (reg->smin_value >= sval)
+ else if (smin1 >= smax2)
return 0;
break;
case BPF_JGE:
- if (reg->umin_value >= val)
+ if (umin1 >= umax2)
return 1;
- else if (reg->umax_value < val)
+ else if (umax1 < umin2)
return 0;
break;
case BPF_JSGE:
- if (reg->smin_value >= sval)
+ if (smin1 >= smax2)
return 1;
- else if (reg->smax_value < sval)
+ else if (smax1 < smin2)
return 0;
break;
case BPF_JLE:
- if (reg->umax_value <= val)
+ if (umax1 <= umin2)
return 1;
- else if (reg->umin_value > val)
+ else if (umin1 > umax2)
return 0;
break;
case BPF_JSLE:
- if (reg->smax_value <= sval)
+ if (smax1 <= smin2)
return 1;
- else if (reg->smin_value > sval)
+ else if (smin1 > smax2)
return 0;
break;
}
@@ -14336,41 +14264,6 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
return -1;
}
-/* compute branch direction of the expression "if (reg opcode val) goto target;"
- * and return:
- * 1 - branch will be taken and "goto target" will be executed
- * 0 - branch will not be taken and fall-through to next insn
- * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
- * range [0,10]
- */
-static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
- bool is_jmp32)
-{
- if (__is_pointer_value(false, reg)) {
- if (!reg_not_null(reg))
- return -1;
-
- /* If pointer is valid tests against zero will fail so we can
- * use this to direct branch taken.
- */
- if (val != 0)
- return -1;
-
- switch (opcode) {
- case BPF_JEQ:
- return 0;
- case BPF_JNE:
- return 1;
- default:
- return -1;
- }
- }
-
- if (is_jmp32)
- return is_branch32_taken(reg, val, opcode);
- return is_branch64_taken(reg, val, opcode);
-}
-
static int flip_opcode(u32 opcode)
{
/* How can we transform "a <op> b" into "b <op> a"? */
@@ -14432,216 +14325,280 @@ static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
return -1;
}
-/* Adjusts the register min/max values in the case that the dst_reg is the
- * variable register that we are working on, and src_reg is a constant or we're
- * simply doing a BPF_K check.
- * In JEQ/JNE cases we also adjust the var_off values.
+/* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;"
+ * and return:
+ * 1 - branch will be taken and "goto target" will be executed
+ * 0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value
+ * range [0,10]
*/
-static void reg_set_min_max(struct bpf_reg_state *true_reg,
- struct bpf_reg_state *false_reg,
- u64 val, u32 val32,
- u8 opcode, bool is_jmp32)
-{
- struct tnum false_32off = tnum_subreg(false_reg->var_off);
- struct tnum false_64off = false_reg->var_off;
- struct tnum true_32off = tnum_subreg(true_reg->var_off);
- struct tnum true_64off = true_reg->var_off;
- s64 sval = (s64)val;
- s32 sval32 = (s32)val32;
-
- /* If the dst_reg is a pointer, we can't learn anything about its
- * variable offset from the compare (unless src_reg were a pointer into
- * the same object, but we don't bother with that.
- * Since false_reg and true_reg have the same type by construction, we
- * only need to check one of them for pointerness.
- */
- if (__is_pointer_value(false, false_reg))
- return;
+static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
+ u8 opcode, bool is_jmp32)
+{
+ if (reg_is_pkt_pointer_any(reg1) && reg_is_pkt_pointer_any(reg2) && !is_jmp32)
+ return is_pkt_ptr_branch_taken(reg1, reg2, opcode);
+ if (__is_pointer_value(false, reg1) || __is_pointer_value(false, reg2)) {
+ u64 val;
+
+ /* arrange that reg2 is a scalar, and reg1 is a pointer */
+ if (!is_reg_const(reg2, is_jmp32)) {
+ opcode = flip_opcode(opcode);
+ swap(reg1, reg2);
+ }
+ /* and ensure that reg2 is a constant */
+ if (!is_reg_const(reg2, is_jmp32))
+ return -1;
+
+ if (!reg_not_null(reg1))
+ return -1;
+
+ /* If pointer is valid tests against zero will fail so we can
+ * use this to direct branch taken.
+ */
+ val = reg_const_value(reg2, is_jmp32);
+ if (val != 0)
+ return -1;
+
+ switch (opcode) {
+ case BPF_JEQ:
+ return 0;
+ case BPF_JNE:
+ return 1;
+ default:
+ return -1;
+ }
+ }
+
+ /* now deal with two scalars, but not necessarily constants */
+ return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32);
+}
+
+/* Opcode that corresponds to a *false* branch condition.
+ * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2
+ */
+static u8 rev_opcode(u8 opcode)
+{
switch (opcode) {
- /* JEQ/JNE comparison doesn't change the register equivalence.
- *
- * r1 = r2;
- * if (r1 == 42) goto label;
- * ...
- * label: // here both r1 and r2 are known to be 42.
- *
- * Hence when marking register as known preserve it's ID.
+ case BPF_JEQ: return BPF_JNE;
+ case BPF_JNE: return BPF_JEQ;
+ /* JSET doesn't have it's reverse opcode in BPF, so add
+ * BPF_X flag to denote the reverse of that operation
*/
+ case BPF_JSET: return BPF_JSET | BPF_X;
+ case BPF_JSET | BPF_X: return BPF_JSET;
+ case BPF_JGE: return BPF_JLT;
+ case BPF_JGT: return BPF_JLE;
+ case BPF_JLE: return BPF_JGT;
+ case BPF_JLT: return BPF_JGE;
+ case BPF_JSGE: return BPF_JSLT;
+ case BPF_JSGT: return BPF_JSLE;
+ case BPF_JSLE: return BPF_JSGT;
+ case BPF_JSLT: return BPF_JSGE;
+ default: return 0;
+ }
+}
+
+/* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */
+static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2,
+ u8 opcode, bool is_jmp32)
+{
+ struct tnum t;
+ u64 val;
+
+again:
+ switch (opcode) {
case BPF_JEQ:
if (is_jmp32) {
- __mark_reg32_known(true_reg, val32);
- true_32off = tnum_subreg(true_reg->var_off);
+ reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
+ reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
+ reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
+ reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
+ reg2->u32_min_value = reg1->u32_min_value;
+ reg2->u32_max_value = reg1->u32_max_value;
+ reg2->s32_min_value = reg1->s32_min_value;
+ reg2->s32_max_value = reg1->s32_max_value;
+
+ t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off));
+ reg1->var_off = tnum_with_subreg(reg1->var_off, t);
+ reg2->var_off = tnum_with_subreg(reg2->var_off, t);
} else {
- ___mark_reg_known(true_reg, val);
- true_64off = true_reg->var_off;
+ reg1->umin_value = max(reg1->umin_value, reg2->umin_value);
+ reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
+ reg1->smin_value = max(reg1->smin_value, reg2->smin_value);
+ reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
+ reg2->umin_value = reg1->umin_value;
+ reg2->umax_value = reg1->umax_value;
+ reg2->smin_value = reg1->smin_value;
+ reg2->smax_value = reg1->smax_value;
+
+ reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off);
+ reg2->var_off = reg1->var_off;
}
break;
case BPF_JNE:
+ if (!is_reg_const(reg2, is_jmp32))
+ swap(reg1, reg2);
+ if (!is_reg_const(reg2, is_jmp32))
+ break;
+
+ /* try to recompute the bound of reg1 if reg2 is a const and
+ * is exactly the edge of reg1.
+ */
+ val = reg_const_value(reg2, is_jmp32);
if (is_jmp32) {
- __mark_reg32_known(false_reg, val32);
- false_32off = tnum_subreg(false_reg->var_off);
+ /* u32_min_value is not equal to 0xffffffff at this point,
+ * because otherwise u32_max_value is 0xffffffff as well,
+ * in such a case both reg1 and reg2 would be constants,
+ * jump would be predicted and reg_set_min_max() won't
+ * be called.
+ *
+ * Same reasoning works for all {u,s}{min,max}{32,64} cases
+ * below.
+ */
+ if (reg1->u32_min_value == (u32)val)
+ reg1->u32_min_value++;
+ if (reg1->u32_max_value == (u32)val)
+ reg1->u32_max_value--;
+ if (reg1->s32_min_value == (s32)val)
+ reg1->s32_min_value++;
+ if (reg1->s32_max_value == (s32)val)
+ reg1->s32_max_value--;
} else {
- ___mark_reg_known(false_reg, val);
- false_64off = false_reg->var_off;
+ if (reg1->umin_value == (u64)val)
+ reg1->umin_value++;
+ if (reg1->umax_value == (u64)val)
+ reg1->umax_value--;
+ if (reg1->smin_value == (s64)val)
+ reg1->smin_value++;
+ if (reg1->smax_value == (s64)val)
+ reg1->smax_value--;
}
break;
case BPF_JSET:
+ if (!is_reg_const(reg2, is_jmp32))
+ swap(reg1, reg2);
+ if (!is_reg_const(reg2, is_jmp32))
+ break;
+ val = reg_const_value(reg2, is_jmp32);
+ /* BPF_JSET (i.e., TRUE branch, *not* BPF_JSET | BPF_X)
+ * requires single bit to learn something useful. E.g., if we
+ * know that `r1 & 0x3` is true, then which bits (0, 1, or both)
+ * are actually set? We can learn something definite only if
+ * it's a single-bit value to begin with.
+ *
+ * BPF_JSET | BPF_X (i.e., negation of BPF_JSET) doesn't have
+ * this restriction. I.e., !(r1 & 0x3) means neither bit 0 nor
+ * bit 1 is set, which we can readily use in adjustments.
+ */
+ if (!is_power_of_2(val))
+ break;
if (is_jmp32) {
- false_32off = tnum_and(false_32off, tnum_const(~val32));
- if (is_power_of_2(val32))
- true_32off = tnum_or(true_32off,
- tnum_const(val32));
+ t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val));
+ reg1->var_off = tnum_with_subreg(reg1->var_off, t);
} else {
- false_64off = tnum_and(false_64off, tnum_const(~val));
- if (is_power_of_2(val))
- true_64off = tnum_or(true_64off,
- tnum_const(val));
+ reg1->var_off = tnum_or(reg1->var_off, tnum_const(val));
}
break;
- case BPF_JGE:
- case BPF_JGT:
- {
+ case BPF_JSET | BPF_X: /* reverse of BPF_JSET, see rev_opcode() */
+ if (!is_reg_const(reg2, is_jmp32))
+ swap(reg1, reg2);
+ if (!is_reg_const(reg2, is_jmp32))
+ break;
+ val = reg_const_value(reg2, is_jmp32);
if (is_jmp32) {
- u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
- u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
-
- false_reg->u32_max_value = min(false_reg->u32_max_value,
- false_umax);
- true_reg->u32_min_value = max(true_reg->u32_min_value,
- true_umin);
+ t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val));
+ reg1->var_off = tnum_with_subreg(reg1->var_off, t);
} else {
- u64 false_umax = opcode == BPF_JGT ? val : val - 1;
- u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
-
- false_reg->umax_value = min(false_reg->umax_value, false_umax);
- true_reg->umin_value = max(true_reg->umin_value, true_umin);
+ reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val));
}
break;
- }
- case BPF_JSGE:
- case BPF_JSGT:
- {
+ case BPF_JLE:
if (is_jmp32) {
- s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
- s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
-
- false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
- true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
+ reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value);
+ reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value);
} else {
- s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
- s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
-
- false_reg->smax_value = min(false_reg->smax_value, false_smax);
- true_reg->smin_value = max(true_reg->smin_value, true_smin);
+ reg1->umax_value = min(reg1->umax_value, reg2->umax_value);
+ reg2->umin_value = max(reg1->umin_value, reg2->umin_value);
}
break;
- }
- case BPF_JLE:
case BPF_JLT:
- {
if (is_jmp32) {
- u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
- u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
-
- false_reg->u32_min_value = max(false_reg->u32_min_value,
- false_umin);
- true_reg->u32_max_value = min(true_reg->u32_max_value,
- true_umax);
+ reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1);
+ reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value);
} else {
- u64 false_umin = opcode == BPF_JLT ? val : val + 1;
- u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
-
- false_reg->umin_value = max(false_reg->umin_value, false_umin);
- true_reg->umax_value = min(true_reg->umax_value, true_umax);
+ reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1);
+ reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value);
}
break;
- }
case BPF_JSLE:
+ if (is_jmp32) {
+ reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value);
+ reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value);
+ } else {
+ reg1->smax_value = min(reg1->smax_value, reg2->smax_value);
+ reg2->smin_value = max(reg1->smin_value, reg2->smin_value);
+ }
+ break;
case BPF_JSLT:
- {
if (is_jmp32) {
- s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
- s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
-
- false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
- true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
+ reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1);
+ reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value);
} else {
- s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
- s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
-
- false_reg->smin_value = max(false_reg->smin_value, false_smin);
- true_reg->smax_value = min(true_reg->smax_value, true_smax);
+ reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1);
+ reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value);
}
break;
- }
+ case BPF_JGE:
+ case BPF_JGT:
+ case BPF_JSGE:
+ case BPF_JSGT:
+ /* just reuse LE/LT logic above */
+ opcode = flip_opcode(opcode);
+ swap(reg1, reg2);
+ goto again;
default:
return;
}
-
- if (is_jmp32) {
- false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
- tnum_subreg(false_32off));
- true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
- tnum_subreg(true_32off));
- __reg_combine_32_into_64(false_reg);
- __reg_combine_32_into_64(true_reg);
- } else {
- false_reg->var_off = false_64off;
- true_reg->var_off = true_64off;
- __reg_combine_64_into_32(false_reg);
- __reg_combine_64_into_32(true_reg);
- }
}
-/* Same as above, but for the case that dst_reg holds a constant and src_reg is
- * the variable reg.
+/* Adjusts the register min/max values in the case that the dst_reg and
+ * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K
+ * check, in which case we havea fake SCALAR_VALUE representing insn->imm).
+ * Technically we can do similar adjustments for pointers to the same object,
+ * but we don't support that right now.
*/
-static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
- struct bpf_reg_state *false_reg,
- u64 val, u32 val32,
- u8 opcode, bool is_jmp32)
+static int reg_set_min_max(struct bpf_verifier_env *env,
+ struct bpf_reg_state *true_reg1,
+ struct bpf_reg_state *true_reg2,
+ struct bpf_reg_state *false_reg1,
+ struct bpf_reg_state *false_reg2,
+ u8 opcode, bool is_jmp32)
{
- opcode = flip_opcode(opcode);
- /* This uses zero as "not present in table"; luckily the zero opcode,
- * BPF_JA, can't get here.
+ int err;
+
+ /* If either register is a pointer, we can't learn anything about its
+ * variable offset from the compare (unless they were a pointer into
+ * the same object, but we don't bother with that).
*/
- if (opcode)
- reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
-}
-
-/* Regs are known to be equal, so intersect their min/max/var_off */
-static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
- struct bpf_reg_state *dst_reg)
-{
- src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
- dst_reg->umin_value);
- src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
- dst_reg->umax_value);
- src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
- dst_reg->smin_value);
- src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
- dst_reg->smax_value);
- src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
- dst_reg->var_off);
- reg_bounds_sync(src_reg);
- reg_bounds_sync(dst_reg);
-}
+ if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE)
+ return 0;
-static void reg_combine_min_max(struct bpf_reg_state *true_src,
- struct bpf_reg_state *true_dst,
- struct bpf_reg_state *false_src,
- struct bpf_reg_state *false_dst,
- u8 opcode)
-{
- switch (opcode) {
- case BPF_JEQ:
- __reg_combine_min_max(true_src, true_dst);
- break;
- case BPF_JNE:
- __reg_combine_min_max(false_src, false_dst);
- break;
- }
+ /* fallthrough (FALSE) branch */
+ regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32);
+ reg_bounds_sync(false_reg1);
+ reg_bounds_sync(false_reg2);
+
+ /* jump (TRUE) branch */
+ regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32);
+ reg_bounds_sync(true_reg1);
+ reg_bounds_sync(true_reg2);
+
+ err = reg_bounds_sanity_check(env, true_reg1, "true_reg1");
+ err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2");
+ err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1");
+ err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2");
+ return err;
}
static void mark_ptr_or_null_reg(struct bpf_func_state *state,
@@ -14839,6 +14796,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
+ struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -14879,42 +14837,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
+ src_reg = &fake_reg;
+ src_reg->type = SCALAR_VALUE;
+ __mark_reg_known(src_reg, insn->imm);
}
is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
-
- if (BPF_SRC(insn->code) == BPF_K) {
- pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
- } else if (src_reg->type == SCALAR_VALUE &&
- is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
- pred = is_branch_taken(dst_reg,
- tnum_subreg(src_reg->var_off).value,
- opcode,
- is_jmp32);
- } else if (src_reg->type == SCALAR_VALUE &&
- !is_jmp32 && tnum_is_const(src_reg->var_off)) {
- pred = is_branch_taken(dst_reg,
- src_reg->var_off.value,
- opcode,
- is_jmp32);
- } else if (dst_reg->type == SCALAR_VALUE &&
- is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) {
- pred = is_branch_taken(src_reg,
- tnum_subreg(dst_reg->var_off).value,
- flip_opcode(opcode),
- is_jmp32);
- } else if (dst_reg->type == SCALAR_VALUE &&
- !is_jmp32 && tnum_is_const(dst_reg->var_off)) {
- pred = is_branch_taken(src_reg,
- dst_reg->var_off.value,
- flip_opcode(opcode),
- is_jmp32);
- } else if (reg_is_pkt_pointer_any(dst_reg) &&
- reg_is_pkt_pointer_any(src_reg) &&
- !is_jmp32) {
- pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
- }
-
+ pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32);
if (pred >= 0) {
/* If we get here with a dst_reg pointer type it is because
* above is_branch_taken() special cased the 0 comparison.
@@ -14962,53 +14891,27 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
return -EFAULT;
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
- /* detect if we are comparing against a constant value so we can adjust
- * our min/max values for our dst register.
- * this is only legit if both are scalars (or pointers to the same
- * object, I suppose, see the PTR_MAYBE_NULL related if block below),
- * because otherwise the different base pointers mean the offsets aren't
- * comparable.
- */
if (BPF_SRC(insn->code) == BPF_X) {
- struct bpf_reg_state *src_reg = &regs[insn->src_reg];
-
- if (dst_reg->type == SCALAR_VALUE &&
- src_reg->type == SCALAR_VALUE) {
- if (tnum_is_const(src_reg->var_off) ||
- (is_jmp32 &&
- tnum_is_const(tnum_subreg(src_reg->var_off))))
- reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg,
- src_reg->var_off.value,
- tnum_subreg(src_reg->var_off).value,
- opcode, is_jmp32);
- else if (tnum_is_const(dst_reg->var_off) ||
- (is_jmp32 &&
- tnum_is_const(tnum_subreg(dst_reg->var_off))))
- reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
- src_reg,
- dst_reg->var_off.value,
- tnum_subreg(dst_reg->var_off).value,
- opcode, is_jmp32);
- else if (!is_jmp32 &&
- (opcode == BPF_JEQ || opcode == BPF_JNE))
- /* Comparing for equality, we can combine knowledge */
- reg_combine_min_max(&other_branch_regs[insn->src_reg],
- &other_branch_regs[insn->dst_reg],
- src_reg, dst_reg, opcode);
- if (src_reg->id &&
- !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
- find_equal_scalars(this_branch, src_reg);
- find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
- }
-
- }
- } else if (dst_reg->type == SCALAR_VALUE) {
- reg_set_min_max(&other_branch_regs[insn->dst_reg],
- dst_reg, insn->imm, (u32)insn->imm,
- opcode, is_jmp32);
+ err = reg_set_min_max(env,
+ &other_branch_regs[insn->dst_reg],
+ &other_branch_regs[insn->src_reg],
+ dst_reg, src_reg, opcode, is_jmp32);
+ } else /* BPF_SRC(insn->code) == BPF_K */ {
+ err = reg_set_min_max(env,
+ &other_branch_regs[insn->dst_reg],
+ src_reg /* fake one */,
+ dst_reg, src_reg /* same fake one */,
+ opcode, is_jmp32);
}
+ if (err)
+ return err;
+ if (BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == SCALAR_VALUE && src_reg->id &&
+ !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
+ find_equal_scalars(this_branch, src_reg);
+ find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
+ }
if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
!WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
find_equal_scalars(this_branch, dst_reg);
@@ -15280,12 +15183,13 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
-static int check_return_code(struct bpf_verifier_env *env, int regno)
+static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name)
{
+ const char *exit_ctx = "At program exit";
struct tnum enforce_attach_type_range = tnum_unknown;
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
- struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
+ struct bpf_retval_range range = retval_range(0, 1);
enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -15327,17 +15231,9 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
if (frame->in_async_callback_fn) {
/* enforce return zero from async callbacks like timer */
- if (reg->type != SCALAR_VALUE) {
- verbose(env, "In async callback the register R%d is not a known value (%s)\n",
- regno, reg_type_str(env, reg->type));
- return -EINVAL;
- }
-
- if (!tnum_in(const_0, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
- return -EINVAL;
- }
- return 0;
+ exit_ctx = "At async callback return";
+ range = retval_range(0, 0);
+ goto enforce_retval;
}
if (is_subprog && !frame->in_exception_callback_fn) {
@@ -15360,14 +15256,14 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME ||
env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME)
- range = tnum_range(1, 1);
+ range = retval_range(1, 1);
if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
- range = tnum_range(0, 3);
+ range = retval_range(0, 3);
break;
case BPF_PROG_TYPE_CGROUP_SKB:
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
- range = tnum_range(0, 3);
+ range = retval_range(0, 3);
enforce_attach_type_range = tnum_range(2, 3);
}
break;
@@ -15380,13 +15276,13 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
case BPF_PROG_TYPE_RAW_TRACEPOINT:
if (!env->prog->aux->attach_btf_id)
return 0;
- range = tnum_const(0);
+ range = retval_range(0, 0);
break;
case BPF_PROG_TYPE_TRACING:
switch (env->prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
- range = tnum_const(0);
+ range = retval_range(0, 0);
break;
case BPF_TRACE_RAW_TP:
case BPF_MODIFY_RETURN:
@@ -15398,7 +15294,7 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
}
break;
case BPF_PROG_TYPE_SK_LOOKUP:
- range = tnum_range(SK_DROP, SK_PASS);
+ range = retval_range(SK_DROP, SK_PASS);
break;
case BPF_PROG_TYPE_LSM:
@@ -15412,12 +15308,12 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
/* Make sure programs that attach to void
* hooks don't try to modify return value.
*/
- range = tnum_range(1, 1);
+ range = retval_range(1, 1);
}
break;
case BPF_PROG_TYPE_NETFILTER:
- range = tnum_range(NF_DROP, NF_ACCEPT);
+ range = retval_range(NF_DROP, NF_ACCEPT);
break;
case BPF_PROG_TYPE_EXT:
/* freplace program can return anything as its return value
@@ -15427,15 +15323,21 @@ static int check_return_code(struct bpf_verifier_env *env, int regno)
return 0;
}
+enforce_retval:
if (reg->type != SCALAR_VALUE) {
- verbose(env, "At program exit the register R%d is not a known value (%s)\n",
- regno, reg_type_str(env, reg->type));
+ verbose(env, "%s the register R%d is not a known value (%s)\n",
+ exit_ctx, regno, reg_type_str(env, reg->type));
return -EINVAL;
}
- if (!tnum_in(range, reg->var_off)) {
- verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
- if (prog->expected_attach_type == BPF_LSM_CGROUP &&
+ err = mark_chain_precision(env, regno);
+ if (err)
+ return err;
+
+ if (!retval_range_within(range, reg)) {
+ verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name);
+ if (!is_subprog &&
+ prog->expected_attach_type == BPF_LSM_CGROUP &&
prog_type == BPF_PROG_TYPE_LSM &&
!prog->aux->attach_func_proto->type)
verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
@@ -17200,7 +17102,8 @@ hit:
* the precision needs to be propagated back in
* the current state.
*/
- err = err ? : push_jmp_history(env, cur);
+ if (is_jmp_point(env, env->insn_idx))
+ err = err ? : push_jmp_history(env, cur, 0);
err = err ? : propagate_precision(env, &sl->state);
if (err)
return err;
@@ -17425,6 +17328,9 @@ static int do_check(struct bpf_verifier_env *env)
u8 class;
int err;
+ /* reset current history entry on each new instruction */
+ env->cur_hist_ent = NULL;
+
env->prev_insn_idx = prev_insn_idx;
if (env->insn_idx >= insn_cnt) {
verbose(env, "invalid insn idx %d insn_cnt %d\n",
@@ -17464,7 +17370,7 @@ static int do_check(struct bpf_verifier_env *env)
}
if (is_jmp_point(env, env->insn_idx)) {
- err = push_jmp_history(env, state);
+ err = push_jmp_history(env, state, 0);
if (err)
return err;
}
@@ -17541,10 +17447,8 @@ static int do_check(struct bpf_verifier_env *env)
insn->off, BPF_SIZE(insn->code),
BPF_READ, insn->dst_reg, false,
BPF_MODE(insn->code) == BPF_MEMSX);
- if (err)
- return err;
-
- err = save_aux_ptr_type(env, src_reg_type, true);
+ err = err ?: save_aux_ptr_type(env, src_reg_type, true);
+ err = err ?: reg_bounds_sanity_check(env, &regs[insn->dst_reg], "ldx");
if (err)
return err;
} else if (class == BPF_STX) {
@@ -17720,7 +17624,7 @@ process_bpf_exit_full:
continue;
}
- err = check_return_code(env, BPF_REG_0);
+ err = check_return_code(env, BPF_REG_0, "R0");
if (err)
return err;
process_bpf_exit:
@@ -18181,10 +18085,12 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
return -E2BIG;
}
+ if (env->prog->aux->sleepable)
+ atomic64_inc(&map->sleepable_refcnt);
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
- * and all maps are released in free_used_maps()
+ * and all maps are released in bpf_free_used_maps()
*/
bpf_map_inc(map);
@@ -19401,9 +19307,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
env->exception_callback_subprog = env->subprog_cnt - 1;
/* Don't update insn_cnt, as add_hidden_subprog always appends insns */
- env->subprog_info[env->exception_callback_subprog].is_cb = true;
- env->subprog_info[env->exception_callback_subprog].is_async_cb = true;
- env->subprog_info[env->exception_callback_subprog].is_exception_cb = true;
+ mark_subprog_exc_cb(env, env->exception_callback_subprog);
}
for (i = 0; i < insn_cnt; i++, insn++) {
@@ -20103,9 +20007,10 @@ static void free_states(struct bpf_verifier_env *env)
}
}
-static int do_check_common(struct bpf_verifier_env *env, int subprog, bool is_ex_cb)
+static int do_check_common(struct bpf_verifier_env *env, int subprog)
{
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
+ struct bpf_subprog_info *sub = subprog_info(env, subprog);
struct bpf_verifier_state *state;
struct bpf_reg_state *regs;
int ret, i;
@@ -20132,46 +20037,71 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog, bool is_ex
state->first_insn_idx = env->subprog_info[subprog].start;
state->last_insn_idx = -1;
+
regs = state->frame[state->curframe]->regs;
if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
- ret = btf_prepare_func_args(env, subprog, regs, is_ex_cb);
+ const char *sub_name = subprog_name(env, subprog);
+ struct bpf_subprog_arg_info *arg;
+ struct bpf_reg_state *reg;
+
+ verbose(env, "Validating %s() func#%d...\n", sub_name, subprog);
+ ret = btf_prepare_func_args(env, subprog);
if (ret)
goto out;
- for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
- if (regs[i].type == PTR_TO_CTX)
+
+ if (subprog_is_exc_cb(env, subprog)) {
+ state->frame[0]->in_exception_callback_fn = true;
+ /* We have already ensured that the callback returns an integer, just
+ * like all global subprogs. We need to determine it only has a single
+ * scalar argument.
+ */
+ if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) {
+ verbose(env, "exception cb only supports single integer argument\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ for (i = BPF_REG_1; i <= sub->arg_cnt; i++) {
+ arg = &sub->args[i - BPF_REG_1];
+ reg = &regs[i];
+
+ if (arg->arg_type == ARG_PTR_TO_CTX) {
+ reg->type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, i);
- else if (regs[i].type == SCALAR_VALUE)
+ } else if (arg->arg_type == ARG_ANYTHING) {
+ reg->type = SCALAR_VALUE;
mark_reg_unknown(env, regs, i);
- else if (base_type(regs[i].type) == PTR_TO_MEM) {
- const u32 mem_size = regs[i].mem_size;
-
+ } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) {
+ /* assume unspecial LOCAL dynptr type */
+ __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen);
+ } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) {
+ reg->type = PTR_TO_MEM;
+ if (arg->arg_type & PTR_MAYBE_NULL)
+ reg->type |= PTR_MAYBE_NULL;
mark_reg_known_zero(env, regs, i);
- regs[i].mem_size = mem_size;
- regs[i].id = ++env->id_gen;
+ reg->mem_size = arg->mem_size;
+ reg->id = ++env->id_gen;
+ } else {
+ WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n",
+ i - BPF_REG_1, arg->arg_type);
+ ret = -EFAULT;
+ goto out;
}
}
- if (is_ex_cb) {
- state->frame[0]->in_exception_callback_fn = true;
- env->subprog_info[subprog].is_cb = true;
- env->subprog_info[subprog].is_async_cb = true;
- env->subprog_info[subprog].is_exception_cb = true;
- }
} else {
+ /* if main BPF program has associated BTF info, validate that
+ * it's matching expected signature, and otherwise mark BTF
+ * info for main program as unreliable
+ */
+ if (env->prog->aux->func_info_aux) {
+ ret = btf_prepare_func_args(env, 0);
+ if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX)
+ env->prog->aux->func_info_aux[0].unreliable = true;
+ }
+
/* 1st arg to a function */
regs[BPF_REG_1].type = PTR_TO_CTX;
mark_reg_known_zero(env, regs, BPF_REG_1);
- ret = btf_check_subprog_arg_match(env, subprog, regs);
- if (ret == -EFAULT)
- /* unlikely verifier bug. abort.
- * ret == 0 and ret < 0 are sadly acceptable for
- * main() function due to backward compatibility.
- * Like socket filter program may be written as:
- * int bpf_prog(struct pt_regs *ctx)
- * and never dereference that ctx in the program.
- * 'struct pt_regs' is a type mismatch for socket
- * filter that should be using 'struct __sk_buff'.
- */
- goto out;
}
ret = do_check(env);
@@ -20190,8 +20120,11 @@ out:
return ret;
}
-/* Verify all global functions in a BPF program one by one based on their BTF.
- * All global functions must pass verification. Otherwise the whole program is rejected.
+/* Lazily verify all global functions based on their BTF, if they are called
+ * from main BPF program or any of subprograms transitively.
+ * BPF global subprogs called from dead code are not validated.
+ * All callable global functions must pass verification.
+ * Otherwise the whole program is rejected.
* Consider:
* int bar(int);
* int foo(int f)
@@ -20210,25 +20143,50 @@ out:
static int do_check_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog_aux *aux = env->prog->aux;
- int i, ret;
+ struct bpf_func_info_aux *sub_aux;
+ int i, ret, new_cnt;
if (!aux->func_info)
return 0;
+ /* exception callback is presumed to be always called */
+ if (env->exception_callback_subprog)
+ subprog_aux(env, env->exception_callback_subprog)->called = true;
+
+again:
+ new_cnt = 0;
for (i = 1; i < env->subprog_cnt; i++) {
- if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
+ if (!subprog_is_global(env, i))
+ continue;
+
+ sub_aux = subprog_aux(env, i);
+ if (!sub_aux->called || sub_aux->verified)
continue;
+
env->insn_idx = env->subprog_info[i].start;
WARN_ON_ONCE(env->insn_idx == 0);
- ret = do_check_common(env, i, env->exception_callback_subprog == i);
+ ret = do_check_common(env, i);
if (ret) {
return ret;
} else if (env->log.level & BPF_LOG_LEVEL) {
- verbose(env,
- "Func#%d is safe for any args that match its prototype\n",
- i);
+ verbose(env, "Func#%d ('%s') is safe for any args that match its prototype\n",
+ i, subprog_name(env, i));
}
+
+ /* We verified new global subprog, it might have called some
+ * more global subprogs that we haven't verified yet, so we
+ * need to do another pass over subprogs to verify those.
+ */
+ sub_aux->verified = true;
+ new_cnt++;
}
+
+ /* We can't loop forever as we verify at least one global subprog on
+ * each pass.
+ */
+ if (new_cnt)
+ goto again;
+
return 0;
}
@@ -20237,7 +20195,7 @@ static int do_check_main(struct bpf_verifier_env *env)
int ret;
env->insn_idx = 0;
- ret = do_check_common(env, 0, false);
+ ret = do_check_common(env, 0);
if (!ret)
env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
return ret;
@@ -20359,6 +20317,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
struct bpf_attach_target_info *tgt_info)
{
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
+ bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING;
const char prefix[] = "btf_trace_";
int ret = 0, subprog = -1, i;
const struct btf_type *t;
@@ -20429,10 +20388,21 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
bpf_log(log, "Can attach to only JITed progs\n");
return -EINVAL;
}
- if (tgt_prog->type == prog->type) {
- /* Cannot fentry/fexit another fentry/fexit program.
- * Cannot attach program extension to another extension.
- * It's ok to attach fentry/fexit to extension program.
+ if (prog_tracing) {
+ if (aux->attach_tracing_prog) {
+ /*
+ * Target program is an fentry/fexit which is already attached
+ * to another tracing program. More levels of nesting
+ * attachment are not allowed.
+ */
+ bpf_log(log, "Cannot nest tracing program attach more than once\n");
+ return -EINVAL;
+ }
+ } else if (tgt_prog->type == prog->type) {
+ /*
+ * To avoid potential call chain cycles, prevent attaching of a
+ * program extension to another extension. It's ok to attach
+ * fentry/fexit to extension program.
*/
bpf_log(log, "Cannot recursively attach\n");
return -EINVAL;
@@ -20445,16 +20415,15 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
* except fentry/fexit. The reason is the following.
* The fentry/fexit programs are used for performance
* analysis, stats and can be attached to any program
- * type except themselves. When extension program is
- * replacing XDP function it is necessary to allow
- * performance analysis of all functions. Both original
- * XDP program and its program extension. Hence
- * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
- * allowed. If extending of fentry/fexit was allowed it
- * would be possible to create long call chain
- * fentry->extension->fentry->extension beyond
- * reasonable stack size. Hence extending fentry is not
- * allowed.
+ * type. When extension program is replacing XDP function
+ * it is necessary to allow performance analysis of all
+ * functions. Both original XDP program and its program
+ * extension. Hence attaching fentry/fexit to
+ * BPF_PROG_TYPE_EXT is allowed. If extending of
+ * fentry/fexit was allowed it would be possible to create
+ * long call chain fentry->extension->fentry->extension
+ * beyond reasonable stack size. Hence extending fentry
+ * is not allowed.
*/
bpf_log(log, "Cannot extend fentry/fexit\n");
return -EINVAL;
@@ -20831,6 +20800,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
+ env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
env->explored_states = kvcalloc(state_htab_size(env),
sizeof(struct bpf_verifier_state_list *),
@@ -20873,8 +20843,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
if (ret < 0)
goto skip_full_check;
- ret = do_check_subprogs(env);
- ret = ret ?: do_check_main(env);
+ ret = do_check_main(env);
+ ret = ret ?: do_check_subprogs(env);
if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
ret = bpf_prog_offload_finalize(env);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 84e8a0f6e4e0..7ac6c52b25eb 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -24,6 +24,7 @@
#include <linux/key.h>
#include <linux/verification.h>
#include <linux/namei.h>
+#include <linux/fileattr.h>
#include <net/bpf_sk_storage.h>
@@ -41,6 +42,9 @@
#define bpf_event_rcu_dereference(p) \
rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
+#define MAX_UPROBE_MULTI_CNT (1U << 20)
+#define MAX_KPROBE_MULTI_CNT (1U << 20)
+
#ifdef CONFIG_MODULES
struct bpf_trace_module {
struct module *module;
@@ -1376,6 +1380,8 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
struct bpf_dynptr_kern *sig_ptr,
struct bpf_key *trusted_keyring)
{
+ const void *data, *sig;
+ u32 data_len, sig_len;
int ret;
if (trusted_keyring->has_ref) {
@@ -1392,10 +1398,12 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
return ret;
}
- return verify_pkcs7_signature(data_ptr->data,
- __bpf_dynptr_size(data_ptr),
- sig_ptr->data,
- __bpf_dynptr_size(sig_ptr),
+ data_len = __bpf_dynptr_size(data_ptr);
+ data = __bpf_dynptr_data(data_ptr, data_len);
+ sig_len = __bpf_dynptr_size(sig_ptr);
+ sig = __bpf_dynptr_data(sig_ptr, sig_len);
+
+ return verify_pkcs7_signature(data, data_len, sig, sig_len,
trusted_keyring->key,
VERIFYING_UNSPECIFIED_SIGNATURE, NULL,
NULL);
@@ -1427,6 +1435,72 @@ static int __init bpf_key_sig_kfuncs_init(void)
late_initcall(bpf_key_sig_kfuncs_init);
#endif /* CONFIG_KEYS */
+/* filesystem kfuncs */
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_get_file_xattr - get xattr of a file
+ * @file: file to get xattr from
+ * @name__str: name of the xattr
+ * @value_ptr: output buffer of the xattr value
+ *
+ * Get xattr *name__str* of *file* and store the output in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "user." is allowed.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
+ struct bpf_dynptr_kern *value_ptr)
+{
+ struct dentry *dentry;
+ u32 value_len;
+ void *value;
+ int ret;
+
+ if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ return -EPERM;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data_rw(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ dentry = file_dentry(file);
+ ret = inode_permission(&nop_mnt_idmap, dentry->d_inode, MAY_READ);
+ if (ret)
+ return ret;
+ return __vfs_getxattr(dentry, dentry->d_inode, name__str, value, value_len);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_SET8_START(fs_kfunc_set_ids)
+BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_SET8_END(fs_kfunc_set_ids)
+
+static int bpf_get_file_xattr_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ if (!btf_id_set8_contains(&fs_kfunc_set_ids, kfunc_id))
+ return 0;
+
+ /* Only allow to attach from LSM hooks, to avoid recursion */
+ return prog->type != BPF_PROG_TYPE_LSM ? -EACCES : 0;
+}
+
+static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &fs_kfunc_set_ids,
+ .filter = bpf_get_file_xattr_filter,
+};
+
+static int __init bpf_fs_kfuncs_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+}
+
+late_initcall(bpf_fs_kfuncs_init);
+
static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -2899,6 +2973,8 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
cnt = attr->link_create.kprobe_multi.cnt;
if (!cnt)
return -EINVAL;
+ if (cnt > MAX_KPROBE_MULTI_CNT)
+ return -E2BIG;
size = cnt * sizeof(*addrs);
addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
@@ -3029,6 +3105,7 @@ struct bpf_uprobe_multi_link;
struct bpf_uprobe {
struct bpf_uprobe_multi_link *link;
loff_t offset;
+ unsigned long ref_ctr_offset;
u64 cookie;
struct uprobe_consumer consumer;
};
@@ -3037,6 +3114,7 @@ struct bpf_uprobe_multi_link {
struct path path;
struct bpf_link link;
u32 cnt;
+ u32 flags;
struct bpf_uprobe *uprobes;
struct task_struct *task;
};
@@ -3078,9 +3156,79 @@ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
kfree(umulti_link);
}
+static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+{
+ u64 __user *uref_ctr_offsets = u64_to_user_ptr(info->uprobe_multi.ref_ctr_offsets);
+ u64 __user *ucookies = u64_to_user_ptr(info->uprobe_multi.cookies);
+ u64 __user *uoffsets = u64_to_user_ptr(info->uprobe_multi.offsets);
+ u64 __user *upath = u64_to_user_ptr(info->uprobe_multi.path);
+ u32 upath_size = info->uprobe_multi.path_size;
+ struct bpf_uprobe_multi_link *umulti_link;
+ u32 ucount = info->uprobe_multi.count;
+ int err = 0, i;
+ long left;
+
+ if (!upath ^ !upath_size)
+ return -EINVAL;
+
+ if ((uoffsets || uref_ctr_offsets || ucookies) && !ucount)
+ return -EINVAL;
+
+ umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+ info->uprobe_multi.count = umulti_link->cnt;
+ info->uprobe_multi.flags = umulti_link->flags;
+ info->uprobe_multi.pid = umulti_link->task ?
+ task_pid_nr_ns(umulti_link->task, task_active_pid_ns(current)) : 0;
+
+ if (upath) {
+ char *p, *buf;
+
+ upath_size = min_t(u32, upath_size, PATH_MAX);
+
+ buf = kmalloc(upath_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ p = d_path(&umulti_link->path, buf, upath_size);
+ if (IS_ERR(p)) {
+ kfree(buf);
+ return PTR_ERR(p);
+ }
+ upath_size = buf + upath_size - p;
+ left = copy_to_user(upath, p, upath_size);
+ kfree(buf);
+ if (left)
+ return -EFAULT;
+ info->uprobe_multi.path_size = upath_size;
+ }
+
+ if (!uoffsets && !ucookies && !uref_ctr_offsets)
+ return 0;
+
+ if (ucount < umulti_link->cnt)
+ err = -ENOSPC;
+ else
+ ucount = umulti_link->cnt;
+
+ for (i = 0; i < ucount; i++) {
+ if (uoffsets &&
+ put_user(umulti_link->uprobes[i].offset, uoffsets + i))
+ return -EFAULT;
+ if (uref_ctr_offsets &&
+ put_user(umulti_link->uprobes[i].ref_ctr_offset, uref_ctr_offsets + i))
+ return -EFAULT;
+ if (ucookies &&
+ put_user(umulti_link->uprobes[i].cookie, ucookies + i))
+ return -EFAULT;
+ }
+
+ return err;
+}
+
static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
.release = bpf_uprobe_multi_link_release,
.dealloc = bpf_uprobe_multi_link_dealloc,
+ .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
};
static int uprobe_prog_run(struct bpf_uprobe *uprobe,
@@ -3168,7 +3316,6 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
{
struct bpf_uprobe_multi_link *link = NULL;
unsigned long __user *uref_ctr_offsets;
- unsigned long *ref_ctr_offsets = NULL;
struct bpf_link_primer link_primer;
struct bpf_uprobe *uprobes = NULL;
struct task_struct *task = NULL;
@@ -3202,6 +3349,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!upath || !uoffsets || !cnt)
return -EINVAL;
+ if (cnt > MAX_UPROBE_MULTI_CNT)
+ return -E2BIG;
uref_ctr_offsets = u64_to_user_ptr(attr->link_create.uprobe_multi.ref_ctr_offsets);
ucookies = u64_to_user_ptr(attr->link_create.uprobe_multi.cookies);
@@ -3241,22 +3390,20 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!uprobes || !link)
goto error_free;
- if (uref_ctr_offsets) {
- ref_ctr_offsets = kvcalloc(cnt, sizeof(*ref_ctr_offsets), GFP_KERNEL);
- if (!ref_ctr_offsets)
- goto error_free;
- }
-
for (i = 0; i < cnt; i++) {
- if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
+ if (__get_user(uprobes[i].offset, uoffsets + i)) {
err = -EFAULT;
goto error_free;
}
- if (uref_ctr_offsets && __get_user(ref_ctr_offsets[i], uref_ctr_offsets + i)) {
+ if (uprobes[i].offset < 0) {
+ err = -EINVAL;
+ goto error_free;
+ }
+ if (uref_ctr_offsets && __get_user(uprobes[i].ref_ctr_offset, uref_ctr_offsets + i)) {
err = -EFAULT;
goto error_free;
}
- if (__get_user(uprobes[i].offset, uoffsets + i)) {
+ if (ucookies && __get_user(uprobes[i].cookie, ucookies + i)) {
err = -EFAULT;
goto error_free;
}
@@ -3276,6 +3423,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
link->uprobes = uprobes;
link->path = path;
link->task = task;
+ link->flags = flags;
bpf_link_init(&link->link, BPF_LINK_TYPE_UPROBE_MULTI,
&bpf_uprobe_multi_link_lops, prog);
@@ -3283,7 +3431,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
for (i = 0; i < cnt; i++) {
err = uprobe_register_refctr(d_real_inode(link->path.dentry),
uprobes[i].offset,
- ref_ctr_offsets ? ref_ctr_offsets[i] : 0,
+ uprobes[i].ref_ctr_offset,
&uprobes[i].consumer);
if (err) {
bpf_uprobe_unregister(&path, uprobes, i);
@@ -3295,11 +3443,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (err)
goto error_free;
- kvfree(ref_ctr_offsets);
return bpf_link_settle(&link_primer);
error_free:
- kvfree(ref_ctr_offsets);
kvfree(uprobes);
kfree(link);
if (task)