diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2019-10-31 15:16:59 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-10-31 15:17:14 +0100 |
commit | 06087114606c416892bd67c5fde9f0d498afb287 (patch) | |
tree | 6193b759236e868ec2bdc75f9107a43123dbca13 | |
parent | af91acbc62999d62e2ca1e80f660d20561ca55d3 (diff) | |
parent | 12a8654b2e5aab37b22c9608d008f9f0565862c0 (diff) |
Merge branch 'bpf-cleanup-btf-raw-tp'
Alexei Starovoitov says:
====================
v1->v2: addressed Andrii's feedback
When BTF-enabled raw_tp were introduced the plan was to follow up
with BTF-enabled kprobe and kretprobe reusing PROG_RAW_TRACEPOINT
and PROG_KPROBE types. But k[ret]probe expect pt_regs while
BTF-enabled program ctx will be the same as raw_tp.
kretprobe is indistinguishable from kprobe while BTF-enabled
kretprobe will have access to retval while kprobe will not.
Hence PROG_KPROBE type is not reusable and reusing
PROG_RAW_TRACEPOINT no longer fits well.
Hence introduce 'umbrella' prog type BPF_PROG_TYPE_TRACING
that will cover different BTF-enabled tracing attach points.
The changes make libbpf side cleaner as well.
check_attach_btf_id() is cleaner too.
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | include/linux/bpf.h | 5 | ||||
-rw-r--r-- | include/linux/bpf_types.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/bpf.h | 2 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 6 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 34 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 44 | ||||
-rw-r--r-- | tools/include/uapi/linux/bpf.h | 2 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.c | 8 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.h | 5 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 79 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.h | 2 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.map | 2 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf_probes.c | 1 |
13 files changed, 142 insertions, 49 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 171be30fe0ae..80158cff44bd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -373,6 +373,11 @@ enum bpf_cgroup_storage_type { #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +/* The longest tracepoint has 12 args. + * See include/trace/bpf_probe.h + */ +#define MAX_BPF_FUNC_ARGS 12 + struct bpf_prog_stats { u64 cnt; u64 nsecs; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 36a9c2325176..de14872b01ba 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -26,6 +26,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4af8b0819a32..a6bf19dabaab 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, }; enum bpf_attach_type { @@ -199,6 +200,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ff5225759553..985d01ced196 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1571,7 +1571,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, u32 btf_id) { switch (prog_type) { - case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_TRACING: if (btf_id > BTF_MAX_TYPE) return -EINVAL; break; @@ -1833,13 +1833,13 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) return PTR_ERR(prog); if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && + prog->type != BPF_PROG_TYPE_TRACING && prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { err = -EINVAL; goto out_put_prog; } - if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT && - prog->aux->attach_btf_id) { + if (prog->type == BPF_PROG_TYPE_TRACING) { if (attr->raw_tracepoint.name) { /* raw_tp name should not be specified in raw_tp * programs that were verified via in-kernel BTF info diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6b0de04f8b91..2f2374967b36 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9381,24 +9381,36 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; u32 btf_id = prog->aux->attach_btf_id; + const char prefix[] = "btf_trace_"; const struct btf_type *t; const char *tname; - if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT && btf_id) { - const char prefix[] = "btf_trace_"; + if (prog->type != BPF_PROG_TYPE_TRACING) + return 0; - t = btf_type_by_id(btf_vmlinux, btf_id); - if (!t) { - verbose(env, "attach_btf_id %u is invalid\n", btf_id); - return -EINVAL; - } + if (!btf_id) { + verbose(env, "Tracing programs must provide btf_id\n"); + return -EINVAL; + } + t = btf_type_by_id(btf_vmlinux, btf_id); + if (!t) { + verbose(env, "attach_btf_id %u is invalid\n", btf_id); + return -EINVAL; + } + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + if (!tname) { + verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); + return -EINVAL; + } + + switch (prog->expected_attach_type) { + case BPF_TRACE_RAW_TP: if (!btf_type_is_typedef(t)) { verbose(env, "attach_btf_id %u is not a typedef\n", btf_id); return -EINVAL; } - tname = btf_name_by_offset(btf_vmlinux, t->name_off); - if (!tname || strncmp(prefix, tname, sizeof(prefix) - 1)) { + if (strncmp(prefix, tname, sizeof(prefix) - 1)) { verbose(env, "attach_btf_id %u points to wrong type name %s\n", btf_id, tname); return -EINVAL; @@ -9419,8 +9431,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) prog->aux->attach_func_name = tname; prog->aux->attach_func_proto = t; prog->aux->attach_btf_trace = true; + return 0; + default: + return -EINVAL; } - return 0; } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 571c25d60710..f50bf19f7a05 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1055,10 +1055,6 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto_raw_tp; -#ifdef CONFIG_NET - case BPF_FUNC_skb_output: - return &bpf_skb_output_proto; -#endif case BPF_FUNC_get_stackid: return &bpf_get_stackid_proto_raw_tp; case BPF_FUNC_get_stack: @@ -1068,20 +1064,44 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +static const struct bpf_func_proto * +tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { +#ifdef CONFIG_NET + case BPF_FUNC_skb_output: + return &bpf_skb_output_proto; +#endif + default: + return raw_tp_prog_func_proto(func_id, prog); + } +} + static bool raw_tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - /* largest tracepoint in the kernel has 12 args */ - if (off < 0 || off >= sizeof(__u64) * 12) + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return true; +} + +static bool tracing_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; if (type != BPF_READ) return false; if (off % size != 0) return false; - if (!prog->aux->attach_btf_id) - return true; return btf_ctx_access(off, size, type, prog, info); } @@ -1093,6 +1113,14 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { const struct bpf_prog_ops raw_tracepoint_prog_ops = { }; +const struct bpf_verifier_ops tracing_verifier_ops = { + .get_func_proto = tracing_prog_func_proto, + .is_valid_access = tracing_prog_is_valid_access, +}; + +const struct bpf_prog_ops tracing_prog_ops = { +}; + static bool raw_tp_writable_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4af8b0819a32..a6bf19dabaab 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, }; enum bpf_attach_type { @@ -199,6 +200,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 79046067720f..ca0d635b1d5e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -228,9 +228,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT) - /* expected_attach_type is ignored for tracing progs */ - attr.attach_btf_id = attr.expected_attach_type; + if (attr.prog_type == BPF_PROG_TYPE_TRACING) + attr.attach_btf_id = load_attr->attach_btf_id; + else + attr.prog_ifindex = load_attr->prog_ifindex; attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -245,7 +246,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, } attr.kern_version = load_attr->kern_version; - attr.prog_ifindex = load_attr->prog_ifindex; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0db01334740f..1c53bc5b4b3c 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -78,7 +78,10 @@ struct bpf_load_program_attr { size_t insns_cnt; const char *license; __u32 kern_version; - __u32 prog_ifindex; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; __u32 prog_btf_fd; __u32 func_info_rec_size; const void *func_info; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5d15cc4dfcd6..c80f316f1320 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -188,6 +188,7 @@ struct bpf_program { bpf_program_clear_priv_t clear_priv; enum bpf_attach_type expected_attach_type; + __u32 attach_btf_id; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -3446,6 +3447,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; + load_attr.attach_btf_id = prog->attach_btf_id; retry_load: log_buf = malloc(log_buf_size); @@ -3607,6 +3609,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id); + static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) @@ -3656,6 +3660,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_object__for_each_program(prog, obj) { enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; + __u32 btf_id; err = libbpf_prog_type_by_name(prog->section_name, &prog_type, &attach_type); @@ -3667,6 +3672,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, attach_type); + if (prog_type == BPF_PROG_TYPE_TRACING) { + err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id); + if (err) + goto out; + prog->attach_btf_id = btf_id; + } } return obj; @@ -4518,6 +4529,7 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); +BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -4546,7 +4558,8 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype) /* Programs that use BTF to identify attach point */ -#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0) +#define BPF_PROG_BTF(string, ptype, eatype) \ + BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. @@ -4573,7 +4586,8 @@ static const struct { BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT), BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT), - BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT), + BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_RAW_TP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -4678,27 +4692,6 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, continue; *prog_type = section_names[i].prog_type; *expected_attach_type = section_names[i].expected_attach_type; - if (section_names[i].is_attach_btf) { - struct btf *btf = bpf_core_find_kernel_btf(); - char raw_tp_btf_name[128] = "btf_trace_"; - char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1; - int ret; - - if (IS_ERR(btf)) { - pr_warn("vmlinux BTF is not found\n"); - return -EINVAL; - } - /* prepend "btf_trace_" prefix per kernel convention */ - strncat(dst, name + section_names[i].len, - sizeof(raw_tp_btf_name) - sizeof("btf_trace_")); - ret = btf__find_by_name(btf, raw_tp_btf_name); - btf__free(btf); - if (ret <= 0) { - pr_warn("%s is not found in vmlinux BTF\n", dst); - return -EINVAL; - } - *expected_attach_type = ret; - } return 0; } pr_warn("failed to guess program type based on ELF section name '%s'\n", name); @@ -4711,6 +4704,46 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, return -ESRCH; } +#define BTF_PREFIX "btf_trace_" +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id) +{ + struct btf *btf = bpf_core_find_kernel_btf(); + char raw_tp_btf_name[128] = BTF_PREFIX; + char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1; + int ret, i, err = -EINVAL; + + if (IS_ERR(btf)) { + pr_warn("vmlinux BTF is not found\n"); + return -EINVAL; + } + + if (!name) + goto out; + + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (!section_names[i].is_attach_btf) + continue; + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + /* prepend "btf_trace_" prefix per kernel convention */ + strncat(dst, name + section_names[i].len, + sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX)); + ret = btf__find_by_name(btf, raw_tp_btf_name); + if (ret <= 0) { + pr_warn("%s is not found in vmlinux BTF\n", dst); + goto out; + } + *btf_id = ret; + err = 0; + goto out; + } + pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); + err = -ESRCH; +out: + btf__free(btf); + return err; +} + int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c63e2ff84abc..2b126ee5e173 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -307,6 +307,7 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -326,6 +327,7 @@ LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d1473ea4d7a5..69dded5af00b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -197,4 +197,6 @@ LIBBPF_0.0.6 { bpf_object__open_mem; bpf_program__get_expected_attach_type; bpf_program__get_type; + bpf_program__is_tracing; + bpf_program__set_tracing; } LIBBPF_0.0.5; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 4b0b0364f5fc..a9eb8b322671 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -102,6 +102,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_TRACING: default: break; } |