diff options
author | Jiri Olsa <jolsa@kernel.org> | 2023-09-20 23:31:40 +0200 |
---|---|---|
committer | Andrii Nakryiko <andrii@kernel.org> | 2023-09-25 16:37:44 -0700 |
commit | dd8657894c11b03c6eb0fd53fe9d7fec2072d18b (patch) | |
tree | c6c5d419041eae87c27d6c164c237a08589a8994 /include | |
parent | 3acf8ace68230e9558cf916847f1cc9f208abdf1 (diff) |
bpf: Count missed stats in trace_call_bpf
Increase misses stats in case bpf array execution is skipped
because of recursion check in trace_call_bpf.
Adding bpf_prog_inc_misses_counters that increase misses
counts for all bpf programs in bpf_prog_array.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Tested-by: Song Liu <song@kernel.org>
Reviewed-by: Song Liu <song@kernel.org>
Link: https://lore.kernel.org/bpf/20230920213145.1941596-5-jolsa@kernel.org
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/bpf.h | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 30063a760b5a..a82efd34b741 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2922,6 +2922,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +static __always_inline void +bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) +{ + const struct bpf_prog_array_item *item; + struct bpf_prog *prog; + + if (unlikely(!array)) + return; + + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + bpf_prog_inc_misses_counter(prog); + item++; + } +} + #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, |