summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/arraymap.c27
-rw-r--r--kernel/bpf/bpf_struct_ops.c3
-rw-r--r--kernel/bpf/btf.c64
-rw-r--r--kernel/bpf/cpumap.c3
-rw-r--r--kernel/bpf/devmap.c6
-rw-r--r--kernel/bpf/hashtab.c23
-rw-r--r--kernel/bpf/local_storage.c3
-rw-r--r--kernel/bpf/lpm_trie.c8
-rw-r--r--kernel/bpf/queue_stack_maps.c13
-rw-r--r--kernel/bpf/reuseport_array.c5
-rw-r--r--kernel/bpf/ringbuf.c10
-rw-r--r--kernel/bpf/stackmap.c87
-rw-r--r--kernel/bpf/verifier.c134
-rw-r--r--kernel/events/callchain.c13
-rw-r--r--kernel/trace/bpf_trace.c27
15 files changed, 338 insertions, 88 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 11584618e861..c66e8273fccd 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -386,13 +386,6 @@ static void array_map_free(struct bpf_map *map)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
- * so the programs (can be more than one that used this map) were
- * disconnected from events. Wait for outstanding programs to complete
- * and free the array
- */
- synchronize_rcu();
-
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
@@ -494,6 +487,7 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
vma->vm_pgoff + pgoff);
}
+static int array_map_btf_id;
const struct bpf_map_ops array_map_ops = {
.map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -510,8 +504,11 @@ const struct bpf_map_ops array_map_ops = {
.map_check_btf = array_map_check_btf,
.map_lookup_batch = generic_map_lookup_batch,
.map_update_batch = generic_map_update_batch,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &array_map_btf_id,
};
+static int percpu_array_map_btf_id;
const struct bpf_map_ops percpu_array_map_ops = {
.map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -522,6 +519,8 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_delete_elem = array_map_delete_elem,
.map_seq_show_elem = percpu_array_map_seq_show_elem,
.map_check_btf = array_map_check_btf,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &percpu_array_map_btf_id,
};
static int fd_array_map_alloc_check(union bpf_attr *attr)
@@ -540,8 +539,6 @@ static void fd_array_map_free(struct bpf_map *map)
struct bpf_array *array = container_of(map, struct bpf_array, map);
int i;
- synchronize_rcu();
-
/* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++)
BUG_ON(array->ptrs[i] != NULL);
@@ -868,6 +865,7 @@ static void prog_array_map_free(struct bpf_map *map)
fd_array_map_free(map);
}
+static int prog_array_map_btf_id;
const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = prog_array_map_alloc,
@@ -883,6 +881,8 @@ const struct bpf_map_ops prog_array_map_ops = {
.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
.map_release_uref = prog_array_map_clear,
.map_seq_show_elem = prog_array_map_seq_show_elem,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &prog_array_map_btf_id,
};
static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@@ -961,6 +961,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
rcu_read_unlock();
}
+static int perf_event_array_map_btf_id;
const struct bpf_map_ops perf_event_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -972,6 +973,8 @@ const struct bpf_map_ops perf_event_array_map_ops = {
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
.map_release = perf_event_fd_array_release,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &perf_event_array_map_btf_id,
};
#ifdef CONFIG_CGROUPS
@@ -994,6 +997,7 @@ static void cgroup_fd_array_free(struct bpf_map *map)
fd_array_map_free(map);
}
+static int cgroup_array_map_btf_id;
const struct bpf_map_ops cgroup_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
@@ -1004,6 +1008,8 @@ const struct bpf_map_ops cgroup_array_map_ops = {
.map_fd_get_ptr = cgroup_fd_array_get_ptr,
.map_fd_put_ptr = cgroup_fd_array_put_ptr,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &cgroup_array_map_btf_id,
};
#endif
@@ -1077,6 +1083,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
return insn - insn_buf;
}
+static int array_of_maps_map_btf_id;
const struct bpf_map_ops array_of_maps_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_of_map_alloc,
@@ -1089,4 +1096,6 @@ const struct bpf_map_ops array_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = array_of_map_gen_lookup,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_array",
+ .map_btf_id = &array_of_maps_map_btf_id,
};
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index c6b0decaa46a..969c5d47f81f 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -611,6 +611,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
return map;
}
+static int bpf_struct_ops_map_btf_id;
const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_alloc_check = bpf_struct_ops_map_alloc_check,
.map_alloc = bpf_struct_ops_map_alloc,
@@ -620,6 +621,8 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
.map_delete_elem = bpf_struct_ops_map_delete_elem,
.map_update_elem = bpf_struct_ops_map_update_elem,
.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
+ .map_btf_name = "bpf_struct_ops_map",
+ .map_btf_id = &bpf_struct_ops_map_btf_id,
};
/* "const void *" because some subsystem is
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 58c9af1d4808..4c3007f428b1 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3571,6 +3571,41 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
return ctx_type;
}
+static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
+#define BPF_LINK_TYPE(_id, _name)
+#define BPF_MAP_TYPE(_id, _ops) \
+ [_id] = &_ops,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_LINK_TYPE
+#undef BPF_MAP_TYPE
+};
+
+static int btf_vmlinux_map_ids_init(const struct btf *btf,
+ struct bpf_verifier_log *log)
+{
+ const struct bpf_map_ops *ops;
+ int i, btf_id;
+
+ for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
+ ops = btf_vmlinux_map_ops[i];
+ if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
+ continue;
+ if (!ops->map_btf_name || !ops->map_btf_id) {
+ bpf_log(log, "map type %d is misconfigured\n", i);
+ return -EINVAL;
+ }
+ btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
+ BTF_KIND_STRUCT);
+ if (btf_id < 0)
+ return btf_id;
+ *ops->map_btf_id = btf_id;
+ }
+
+ return 0;
+}
+
static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
struct btf *btf,
const struct btf_type *t,
@@ -3591,7 +3626,7 @@ struct btf *btf_parse_vmlinux(void)
struct btf_verifier_env *env = NULL;
struct bpf_verifier_log *log;
struct btf *btf = NULL;
- int err, i;
+ int err, btf_id;
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
if (!env)
@@ -3625,26 +3660,21 @@ struct btf *btf_parse_vmlinux(void)
goto errout;
/* find struct bpf_ctx_convert for type checking later */
- for (i = 1; i <= btf->nr_types; i++) {
- const struct btf_type *t;
- const char *tname;
-
- t = btf_type_by_id(btf, i);
- if (!__btf_type_is_struct(t))
- continue;
- tname = __btf_name_by_offset(btf, t->name_off);
- if (!strcmp(tname, "bpf_ctx_convert")) {
- /* btf_parse_vmlinux() runs under bpf_verifier_lock */
- bpf_ctx_convert.t = t;
- break;
- }
- }
- if (i > btf->nr_types) {
- err = -ENOENT;
+ btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT);
+ if (btf_id < 0) {
+ err = btf_id;
goto errout;
}
+ /* btf_parse_vmlinux() runs under bpf_verifier_lock */
+ bpf_ctx_convert.t = btf_type_by_id(btf, btf_id);
+
+ /* find bpf map structs for map_ptr access checking */
+ err = btf_vmlinux_map_ids_init(btf, log);
+ if (err < 0)
+ goto errout;
bpf_struct_ops_init(btf, log);
+ init_btf_sock_ids(btf);
btf_verifier_env_free(env);
refcount_set(&btf->refcnt, 1);
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 27595fc6da56..bd8658055c16 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -543,6 +543,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
+static int cpu_map_btf_id;
const struct bpf_map_ops cpu_map_ops = {
.map_alloc = cpu_map_alloc,
.map_free = cpu_map_free,
@@ -551,6 +552,8 @@ const struct bpf_map_ops cpu_map_ops = {
.map_lookup_elem = cpu_map_lookup_elem,
.map_get_next_key = cpu_map_get_next_key,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_cpu_map",
+ .map_btf_id = &cpu_map_btf_id,
};
static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 5fdbc776a760..10abb06065bb 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -749,6 +749,7 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
map, key, value, map_flags);
}
+static int dev_map_btf_id;
const struct bpf_map_ops dev_map_ops = {
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
@@ -757,8 +758,11 @@ const struct bpf_map_ops dev_map_ops = {
.map_update_elem = dev_map_update_elem,
.map_delete_elem = dev_map_delete_elem,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_dtab",
+ .map_btf_id = &dev_map_btf_id,
};
+static int dev_map_hash_map_btf_id;
const struct bpf_map_ops dev_map_hash_ops = {
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
@@ -767,6 +771,8 @@ const struct bpf_map_ops dev_map_hash_ops = {
.map_update_elem = dev_map_hash_update_elem,
.map_delete_elem = dev_map_hash_delete_elem,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_dtab",
+ .map_btf_id = &dev_map_hash_map_btf_id,
};
static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b4b288a3c3c9..d4378d7d442b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1290,12 +1290,10 @@ static void htab_map_free(struct bpf_map *map)
{
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
- /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
- * so the programs (can be more than one that used this map) were
- * disconnected from events. Wait for outstanding critical sections in
- * these programs to complete
+ /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
+ * bpf_free_used_maps() is called after bpf prog is no longer executing.
+ * There is no need to synchronize_rcu() here to protect map elements.
*/
- synchronize_rcu();
/* some of free_htab_elem() callbacks for elements of this map may
* not have executed. Wait for them.
@@ -1614,6 +1612,7 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
true, false);
}
+static int htab_map_btf_id;
const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1625,8 +1624,11 @@ const struct bpf_map_ops htab_map_ops = {
.map_gen_lookup = htab_map_gen_lookup,
.map_seq_show_elem = htab_map_seq_show_elem,
BATCH_OPS(htab),
+ .map_btf_name = "bpf_htab",
+ .map_btf_id = &htab_map_btf_id,
};
+static int htab_lru_map_btf_id;
const struct bpf_map_ops htab_lru_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1639,6 +1641,8 @@ const struct bpf_map_ops htab_lru_map_ops = {
.map_gen_lookup = htab_lru_map_gen_lookup,
.map_seq_show_elem = htab_map_seq_show_elem,
BATCH_OPS(htab_lru),
+ .map_btf_name = "bpf_htab",
+ .map_btf_id = &htab_lru_map_btf_id,
};
/* Called from eBPF program */
@@ -1743,6 +1747,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
rcu_read_unlock();
}
+static int htab_percpu_map_btf_id;
const struct bpf_map_ops htab_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1753,8 +1758,11 @@ const struct bpf_map_ops htab_percpu_map_ops = {
.map_delete_elem = htab_map_delete_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
BATCH_OPS(htab_percpu),
+ .map_btf_name = "bpf_htab",
+ .map_btf_id = &htab_percpu_map_btf_id,
};
+static int htab_lru_percpu_map_btf_id;
const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
@@ -1765,6 +1773,8 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem,
.map_seq_show_elem = htab_percpu_map_seq_show_elem,
BATCH_OPS(htab_lru_percpu),
+ .map_btf_name = "bpf_htab",
+ .map_btf_id = &htab_lru_percpu_map_btf_id,
};
static int fd_htab_map_alloc_check(union bpf_attr *attr)
@@ -1887,6 +1897,7 @@ static void htab_of_map_free(struct bpf_map *map)
fd_htab_map_free(map);
}
+static int htab_of_maps_map_btf_id;
const struct bpf_map_ops htab_of_maps_map_ops = {
.map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc,
@@ -1899,4 +1910,6 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
.map_gen_lookup = htab_of_map_gen_lookup,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_htab",
+ .map_btf_id = &htab_of_maps_map_btf_id,
};
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 33d01866bcc2..51bd5a8cb01b 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -409,6 +409,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
rcu_read_unlock();
}
+static int cgroup_storage_map_btf_id;
const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free,
@@ -418,6 +419,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem,
+ .map_btf_name = "bpf_cgroup_storage_map",
+ .map_btf_id = &cgroup_storage_map_btf_id,
};
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index c8cc4e4cf98d..44474bf3ab7a 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -589,11 +589,6 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node;
- /* Wait for outstanding programs to complete
- * update/lookup/delete/get_next_key and free the trie.
- */
- synchronize_rcu();
-
/* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent
* and start over.
@@ -735,6 +730,7 @@ static int trie_check_btf(const struct bpf_map *map,
-EINVAL : 0;
}
+static int trie_map_btf_id;
const struct bpf_map_ops trie_map_ops = {
.map_alloc = trie_alloc,
.map_free = trie_free,
@@ -743,4 +739,6 @@ const struct bpf_map_ops trie_map_ops = {
.map_update_elem = trie_update_elem,
.map_delete_elem = trie_delete_elem,
.map_check_btf = trie_check_btf,
+ .map_btf_name = "lpm_trie",
+ .map_btf_id = &trie_map_btf_id,
};
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 05c8e043b9d2..44184f82916a 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -101,13 +101,6 @@ static void queue_stack_map_free(struct bpf_map *map)
{
struct bpf_queue_stack *qs = bpf_queue_stack(map);
- /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
- * so the programs (can be more than one that used this map) were
- * disconnected from events. Wait for outstanding critical sections in
- * these programs to complete
- */
- synchronize_rcu();
-
bpf_map_area_free(qs);
}
@@ -262,6 +255,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
return -EINVAL;
}
+static int queue_map_btf_id;
const struct bpf_map_ops queue_map_ops = {
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
@@ -273,8 +267,11 @@ const struct bpf_map_ops queue_map_ops = {
.map_pop_elem = queue_map_pop_elem,
.map_peek_elem = queue_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
+ .map_btf_name = "bpf_queue_stack",
+ .map_btf_id = &queue_map_btf_id,
};
+static int stack_map_btf_id;
const struct bpf_map_ops stack_map_ops = {
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
@@ -286,4 +283,6 @@ const struct bpf_map_ops stack_map_ops = {
.map_pop_elem = stack_map_pop_elem,
.map_peek_elem = stack_map_peek_elem,
.map_get_next_key = queue_stack_map_get_next_key,
+ .map_btf_name = "bpf_queue_stack",
+ .map_btf_id = &stack_map_btf_id,
};
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 21cde24386db..3625c4fcc65c 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -96,8 +96,6 @@ static void reuseport_array_free(struct bpf_map *map)
struct sock *sk;
u32 i;
- synchronize_rcu();
-
/*
* ops->map_*_elem() will not be able to access this
* array now. Hence, this function only races with
@@ -345,6 +343,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
return 0;
}
+static int reuseport_array_map_btf_id;
const struct bpf_map_ops reuseport_array_ops = {
.map_alloc_check = reuseport_array_alloc_check,
.map_alloc = reuseport_array_alloc,
@@ -352,4 +351,6 @@ const struct bpf_map_ops reuseport_array_ops = {
.map_lookup_elem = reuseport_array_lookup_elem,
.map_get_next_key = reuseport_array_get_next_key,
.map_delete_elem = reuseport_array_delete_elem,
+ .map_btf_name = "reuseport_array",
+ .map_btf_id = &reuseport_array_map_btf_id,
};
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 180414bb0d3e..13a8d3967e07 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -215,13 +215,6 @@ static void ringbuf_map_free(struct bpf_map *map)
{
struct bpf_ringbuf_map *rb_map;
- /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
- * so the programs (can be more than one that used this map) were
- * disconnected from events. Wait for outstanding critical sections in
- * these programs to complete
- */
- synchronize_rcu();
-
rb_map = container_of(map, struct bpf_ringbuf_map, map);
bpf_ringbuf_free(rb_map->rb);
kfree(rb_map);
@@ -294,6 +287,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
return 0;
}
+static int ringbuf_map_btf_id;
const struct bpf_map_ops ringbuf_map_ops = {
.map_alloc = ringbuf_map_alloc,
.map_free = ringbuf_map_free,
@@ -303,6 +297,8 @@ const struct bpf_map_ops ringbuf_map_ops = {
.map_update_elem = ringbuf_map_update_elem,
.map_delete_elem = ringbuf_map_delete_elem,
.map_get_next_key = ringbuf_map_get_next_key,
+ .map_btf_name = "bpf_ringbuf_map",
+ .map_btf_id = &ringbuf_map_btf_id,
};
/* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 599488f25e40..a6c361ed7937 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -348,6 +348,44 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
}
+static struct perf_callchain_entry *
+get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
+{
+#ifdef CONFIG_STACKTRACE
+ struct perf_callchain_entry *entry;
+ int rctx;
+
+ entry = get_callchain_entry(&rctx);
+
+ if (!entry)
+ return NULL;
+
+ entry->nr = init_nr +
+ stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
+ sysctl_perf_event_max_stack - init_nr, 0);
+
+ /* stack_trace_save_tsk() works on unsigned long array, while
+ * perf_callchain_entry uses u64 array. For 32-bit systems, it is
+ * necessary to fix this mismatch.
+ */
+ if (__BITS_PER_LONG != 64) {
+ unsigned long *from = (unsigned long *) entry->ip;
+ u64 *to = entry->ip;
+ int i;
+
+ /* copy data from the end to avoid using extra buffer */
+ for (i = entry->nr - 1; i >= (int)init_nr; i--)
+ to[i] = (u64)(from[i]);
+ }
+
+ put_callchain_entry(rctx);
+
+ return entry;
+#else /* CONFIG_STACKTRACE */
+ return NULL;
+#endif
+}
+
BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags)
{
@@ -448,8 +486,8 @@ const struct bpf_func_proto bpf_get_stackid_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
- u64, flags)
+static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+ void *buf, u32 size, u64 flags)
{
u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
bool user_build_id = flags & BPF_F_USER_BUILD_ID;
@@ -471,13 +509,22 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
if (unlikely(size % elem_size))
goto clear;
+ /* cannot get valid user stack for task without user_mode regs */
+ if (task && user && !user_mode(regs))
+ goto err_fault;
+
num_elem = size / elem_size;
if (sysctl_perf_event_max_stack < num_elem)
init_nr = 0;
else
init_nr = sysctl_perf_event_max_stack - num_elem;
- trace = get_perf_callchain(regs, init_nr, kernel, user,
- sysctl_perf_event_max_stack, false, false);
+
+ if (kernel && task)
+ trace = get_callchain_entry_for_task(task, init_nr);
+ else
+ trace = get_perf_callchain(regs, init_nr, kernel, user,
+ sysctl_perf_event_max_stack,
+ false, false);
if (unlikely(!trace))
goto err_fault;
@@ -505,6 +552,12 @@ clear:
return err;
}
+BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
+ u64, flags)
+{
+ return __bpf_get_stack(regs, NULL, buf, size, flags);
+}
+
const struct bpf_func_proto bpf_get_stack_proto = {
.func = bpf_get_stack,
.gpl_only = true,
@@ -515,6 +568,26 @@ const struct bpf_func_proto bpf_get_stack_proto = {
.arg4_type = ARG_ANYTHING,
};
+BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
+ u32, size, u64, flags)
+{
+ struct pt_regs *regs = task_pt_regs(task);
+
+ return __bpf_get_stack(regs, task, buf, size, flags);
+}
+
+static int bpf_get_task_stack_btf_ids[5];
+const struct bpf_func_proto bpf_get_task_stack_proto = {
+ .func = bpf_get_task_stack,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg2_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
+ .btf_id = bpf_get_task_stack_btf_ids,
+};
+
/* Called from eBPF program */
static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
{
@@ -604,15 +677,13 @@ static void stack_map_free(struct bpf_map *map)
{
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
- /* wait for bpf programs to complete before freeing stack map */
- synchronize_rcu();
-
bpf_map_area_free(smap->elems);
pcpu_freelist_destroy(&smap->freelist);
bpf_map_area_free(smap);
put_callchain_buffers();
}
+static int stack_trace_map_btf_id;
const struct bpf_map_ops stack_trace_map_ops = {
.map_alloc = stack_map_alloc,
.map_free = stack_map_free,
@@ -621,6 +692,8 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_update_elem = stack_map_update_elem,
.map_delete_elem = stack_map_delete_elem,
.map_check_btf = map_check_no_btf,
+ .map_btf_name = "bpf_stack_map",
+ .map_btf_id = &stack_trace_map_btf_id,
};
static int __init stack_map_init(void)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 34cde841ab68..b608185e1ffd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1351,6 +1351,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
__mark_reg_not_init(env, regs + regno);
}
+static void mark_btf_ld_reg(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs, u32 regno,
+ enum bpf_reg_type reg_type, u32 btf_id)
+{
+ if (reg_type == SCALAR_VALUE) {
+ mark_reg_unknown(env, regs, regno);
+ return;
+ }
+ mark_reg_known_zero(env, regs, regno);
+ regs[regno].type = PTR_TO_BTF_ID;
+ regs[regno].btf_id = btf_id;
+}
+
#define DEF_NOT_SUBREG (0)
static void init_reg_state(struct bpf_verifier_env *env,
struct bpf_func_state *state)
@@ -3182,19 +3195,68 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
if (ret < 0)
return ret;
- if (atype == BPF_READ && value_regno >= 0) {
- if (ret == SCALAR_VALUE) {
- mark_reg_unknown(env, regs, value_regno);
- return 0;
- }
- mark_reg_known_zero(env, regs, value_regno);
- regs[value_regno].type = PTR_TO_BTF_ID;
- regs[value_regno].btf_id = btf_id;
+ if (atype == BPF_READ && value_regno >= 0)
+ mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
+ return 0;
+}
+
+static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+ struct bpf_reg_state *regs,
+ int regno, int off, int size,
+ enum bpf_access_type atype,
+ int value_regno)
+{
+ struct bpf_reg_state *reg = regs + regno;
+ struct bpf_map *map = reg->map_ptr;
+ const struct btf_type *t;
+ const char *tname;
+ u32 btf_id;
+ int ret;
+
+ if (!btf_vmlinux) {
+ verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
+ return -ENOTSUPP;
}
+ if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
+ verbose(env, "map_ptr access not supported for map type %d\n",
+ map->map_type);
+ return -ENOTSUPP;
+ }
+
+ t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+
+ if (!env->allow_ptr_to_map_access) {
+ verbose(env,
+ "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
+ tname);
+ return -EPERM;
+ }
+
+ if (off < 0) {
+ verbose(env, "R%d is %s invalid negative access: off=%d\n",
+ regno, tname, off);
+ return -EACCES;
+ }
+
+ if (atype != BPF_READ) {
+ verbose(env, "only read from %s is supported\n", tname);
+ return -EACCES;
+ }
+
+ ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+ if (ret < 0)
+ return ret;
+
+ if (value_regno >= 0)
+ mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
return 0;
}
+
/* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory
@@ -3363,6 +3425,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
} else if (reg->type == PTR_TO_BTF_ID) {
err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
value_regno);
+ } else if (reg->type == CONST_PTR_TO_MAP) {
+ err = check_ptr_to_map_access(env, regs, regno, off, size, t,
+ value_regno);
} else {
verbose(env, "R%d invalid mem access '%s'\n", regno,
reg_type_str[reg->type]);
@@ -3735,12 +3800,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type)
return -EINVAL;
}
-static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
- enum bpf_arg_type arg_type,
- struct bpf_call_arg_meta *meta)
+static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ struct bpf_call_arg_meta *meta,
+ const struct bpf_func_proto *fn)
{
+ u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
enum bpf_reg_type expected_type, type = reg->type;
+ enum bpf_arg_type arg_type = fn->arg_type[arg];
int err = 0;
if (arg_type == ARG_DONTCARE)
@@ -3820,9 +3887,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
expected_type = PTR_TO_BTF_ID;
if (type != expected_type)
goto err_type;
- if (reg->btf_id != meta->btf_id) {
- verbose(env, "Helper has type %s got %s in R%d\n",
- kernel_type_name(meta->btf_id),
+ if (!fn->check_btf_id) {
+ if (reg->btf_id != meta->btf_id) {
+ verbose(env, "Helper has type %s got %s in R%d\n",
+ kernel_type_name(meta->btf_id),
+ kernel_type_name(reg->btf_id), regno);
+
+ return -EACCES;
+ }
+ } else if (!fn->check_btf_id(reg->btf_id, arg)) {
+ verbose(env, "Helper does not support %s in R%d\n",
kernel_type_name(reg->btf_id), regno);
return -EACCES;
@@ -4644,10 +4718,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
meta.func_id = func_id;
/* check args */
for (i = 0; i < 5; i++) {
- err = btf_resolve_helper_id(&env->log, fn, i);
- if (err > 0)
- meta.btf_id = err;
- err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
+ if (!fn->check_btf_id) {
+ err = btf_resolve_helper_id(&env->log, fn, i);
+ if (err > 0)
+ meta.btf_id = err;
+ }
+ err = check_func_arg(env, i, &meta, fn);
if (err)
return err;
}
@@ -4750,6 +4826,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
regs[BPF_REG_0].mem_size = meta.mem_size;
+ } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
+ int ret_btf_id;
+
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
+ ret_btf_id = *fn->ret_btf_id;
+ if (ret_btf_id == 0) {
+ verbose(env, "invalid return type %d of func %s#%d\n",
+ fn->ret_type, func_id_name(func_id), func_id);
+ return -EINVAL;
+ }
+ regs[BPF_REG_0].btf_id = ret_btf_id;
} else {
verbose(env, "unknown return type %d of func %s#%d\n",
fn->ret_type, func_id_name(func_id), func_id);
@@ -4776,7 +4864,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
if (err)
return err;
- if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
+ if ((func_id == BPF_FUNC_get_stack ||
+ func_id == BPF_FUNC_get_task_stack) &&
+ !env->prog->has_callchain_buf) {
const char *err_str;
#ifdef CONFIG_PERF_EVENTS
@@ -5031,6 +5121,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */
+ if (opcode == BPF_SUB && env->allow_ptr_leaks) {
+ __mark_reg_unknown(env, dst_reg);
+ return 0;
+ }
+
verbose(env,
"R%d 32-bit pointer arithmetic prohibited\n",
dst);
@@ -10946,6 +11041,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->strict_alignment = false;
env->allow_ptr_leaks = bpf_allow_ptr_leaks();
+ env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
env->bypass_spec_v1 = bpf_bypass_spec_v1();
env->bypass_spec_v4 = bpf_bypass_spec_v4();
env->bpf_capable = bpf_capable();
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 334d48b16c36..c6ce894e4ce9 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -149,7 +149,7 @@ void put_callchain_buffers(void)
}
}
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+struct perf_callchain_entry *get_callchain_entry(int *rctx)
{
int cpu;
struct callchain_cpus_entries *entries;
@@ -159,8 +159,10 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
return NULL;
entries = rcu_dereference(callchain_cpus_entries);
- if (!entries)
+ if (!entries) {
+ put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
return NULL;
+ }
cpu = smp_processor_id();
@@ -168,7 +170,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
(*rctx * perf_callchain_entry__sizeof()));
}
-static void
+void
put_callchain_entry(int rctx)
{
put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
@@ -183,11 +185,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
int rctx;
entry = get_callchain_entry(&rctx);
- if (rctx == -1)
- return NULL;
-
if (!entry)
- goto exit_put;
+ return NULL;
ctx.entry = entry;
ctx.max_stack = max_stack;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 7bc3d6175868..e0b7775039ab 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -376,7 +376,7 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
/*
* Only limited trace_printk() conversion specifiers allowed:
- * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
+ * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
*/
BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
u64, arg2, u64, arg3)
@@ -420,6 +420,11 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
goto fmt_str;
}
+ if (fmt[i + 1] == 'B') {
+ i++;
+ goto fmt_next;
+ }
+
/* disallow any further format extensions */
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
@@ -636,7 +641,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
if (fmt[i] == 'p') {
if (fmt[i + 1] == 0 ||
fmt[i + 1] == 'K' ||
- fmt[i + 1] == 'x') {
+ fmt[i + 1] == 'x' ||
+ fmt[i + 1] == 'B') {
/* just kernel pointers */
params[fmt_cnt] = args[fmt_cnt];
fmt_cnt++;
@@ -681,7 +687,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
}
if (fmt[i] != 'i' && fmt[i] != 'd' &&
- fmt[i] != 'u' && fmt[i] != 'x') {
+ fmt[i] != 'u' && fmt[i] != 'x' &&
+ fmt[i] != 'X') {
err = -EINVAL;
goto out;
}
@@ -1134,6 +1141,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_ringbuf_discard_proto;
case BPF_FUNC_ringbuf_query:
return &bpf_ringbuf_query_proto;
+ case BPF_FUNC_jiffies64:
+ return &bpf_jiffies64_proto;
+ case BPF_FUNC_get_task_stack:
+ return &bpf_get_task_stack_proto;
default:
return NULL;
}
@@ -1512,6 +1523,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skb_output_proto;
case BPF_FUNC_xdp_output:
return &bpf_xdp_output_proto;
+ case BPF_FUNC_skc_to_tcp6_sock:
+ return &bpf_skc_to_tcp6_sock_proto;
+ case BPF_FUNC_skc_to_tcp_sock:
+ return &bpf_skc_to_tcp_sock_proto;
+ case BPF_FUNC_skc_to_tcp_timewait_sock:
+ return &bpf_skc_to_tcp_timewait_sock_proto;
+ case BPF_FUNC_skc_to_tcp_request_sock:
+ return &bpf_skc_to_tcp_request_sock_proto;
+ case BPF_FUNC_skc_to_udp6_sock:
+ return &bpf_skc_to_udp6_sock_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?