diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
21 files changed, 1637 insertions, 26 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 4a01ea9113bf..14e28f991451 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -7,6 +7,13 @@ #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) +/* Convenience macro for use with 'asm volatile' blocks */ +#define __naked __attribute__((naked)) +#define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory" +#define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory" +#define __imm(name) [name]"i"(name) +#define __imm_addr(name) [name]"i"(&name) + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" @@ -21,4 +28,29 @@ #define SYS_PREFIX "__se_" #endif +/* How many arguments are passed to function in register */ +#if defined(__TARGET_ARCH_x86) || defined(__x86_64__) +#define FUNC_REG_ARG_CNT 6 +#elif defined(__i386__) +#define FUNC_REG_ARG_CNT 3 +#elif defined(__TARGET_ARCH_s390) || defined(__s390x__) +#define FUNC_REG_ARG_CNT 5 +#elif defined(__TARGET_ARCH_arm) || defined(__arm__) +#define FUNC_REG_ARG_CNT 4 +#elif defined(__TARGET_ARCH_arm64) || defined(__aarch64__) +#define FUNC_REG_ARG_CNT 8 +#elif defined(__TARGET_ARCH_mips) || defined(__mips__) +#define FUNC_REG_ARG_CNT 8 +#elif defined(__TARGET_ARCH_powerpc) || defined(__powerpc__) || defined(__powerpc64__) +#define FUNC_REG_ARG_CNT 8 +#elif defined(__TARGET_ARCH_sparc) || defined(__sparc__) +#define FUNC_REG_ARG_CNT 6 +#elif defined(__TARGET_ARCH_riscv) || defined(__riscv__) +#define FUNC_REG_ARG_CNT 8 +#else +/* default to 5 for others */ +#define FUNC_REG_ARG_CNT 5 +#endif + + #endif diff --git a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c index e1e11897e99b..1a476d8ed354 100644 --- a/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c +++ b/tools/testing/selftests/bpf/progs/bpf_syscall_macro.c @@ -81,4 +81,30 @@ int BPF_KSYSCALL(prctl_enter, int option, unsigned long arg2, return 0; } +__u64 splice_fd_in; +__u64 splice_off_in; +__u64 splice_fd_out; +__u64 splice_off_out; +__u64 splice_len; +__u64 splice_flags; + +SEC("ksyscall/splice") +int BPF_KSYSCALL(splice_enter, int fd_in, loff_t *off_in, int fd_out, + loff_t *off_out, size_t len, unsigned int flags) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != filter_pid) + return 0; + + splice_fd_in = fd_in; + splice_off_in = (__u64)off_in; + splice_fd_out = fd_out; + splice_off_out = (__u64)off_out; + splice_len = len; + splice_flags = flags; + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h new file mode 100644 index 000000000000..ad34f3b602be --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#ifndef _CPUMASK_COMMON_H +#define _CPUMASK_COMMON_H + +#include "errno.h" +#include <stdbool.h> + +int err; + +struct __cpumask_map_value { + struct bpf_cpumask __kptr_ref * cpumask; +}; + +struct array_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct __cpumask_map_value); + __uint(max_entries, 1); +} __cpumask_map SEC(".maps"); + +struct bpf_cpumask *bpf_cpumask_create(void) __ksym; +void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym; +u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_and(struct bpf_cpumask *cpumask, + const struct cpumask *src1, + const struct cpumask *src2) __ksym; +void bpf_cpumask_or(struct bpf_cpumask *cpumask, + const struct cpumask *src1, + const struct cpumask *src2) __ksym; +void bpf_cpumask_xor(struct bpf_cpumask *cpumask, + const struct cpumask *src1, + const struct cpumask *src2) __ksym; +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; +bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; +bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; +u32 bpf_cpumask_any(const struct cpumask *src) __ksym; +u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; + +static inline const struct cpumask *cast(struct bpf_cpumask *cpumask) +{ + return (const struct cpumask *)cpumask; +} + +static inline struct bpf_cpumask *create_cpumask(void) +{ + struct bpf_cpumask *cpumask; + + cpumask = bpf_cpumask_create(); + if (!cpumask) { + err = 1; + return NULL; + } + + if (!bpf_cpumask_empty(cast(cpumask))) { + err = 2; + bpf_cpumask_release(cpumask); + return NULL; + } + + return cpumask; +} + +static inline struct __cpumask_map_value *cpumask_map_value_lookup(void) +{ + u32 key = 0; + + return bpf_map_lookup_elem(&__cpumask_map, &key); +} + +static inline int cpumask_map_insert(struct bpf_cpumask *mask) +{ + struct __cpumask_map_value local, *v; + long status; + struct bpf_cpumask *old; + u32 key = 0; + + local.cpumask = NULL; + status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0); + if (status) { + bpf_cpumask_release(mask); + return status; + } + + v = bpf_map_lookup_elem(&__cpumask_map, &key); + if (!v) { + bpf_cpumask_release(mask); + return -ENOENT; + } + + old = bpf_kptr_xchg(&v->cpumask, mask); + if (old) { + bpf_cpumask_release(old); + return -EEXIST; + } + + return 0; +} + +#endif /* _CPUMASK_COMMON_H */ diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c new file mode 100644 index 000000000000..33e8e86dd090 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#include "cpumask_common.h" + +char _license[] SEC("license") = "GPL"; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(task_newtask, + * TP_PROTO(struct task_struct *p, u64 clone_flags) + */ + +SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") +int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + cpumask = create_cpumask(); + + /* cpumask is never released. */ + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("NULL pointer passed to trusted arg0") +int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + cpumask = create_cpumask(); + + /* cpumask is released twice. */ + bpf_cpumask_release(cpumask); + bpf_cpumask_release(cpumask); + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask") +int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + /* Can't acquire a non-struct bpf_cpumask. */ + cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask") +int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + /* Can't set the CPU of a non-struct bpf_cpumask. */ + bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") +int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + struct __cpumask_map_value *v; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (cpumask_map_insert(cpumask)) + return 0; + + v = cpumask_map_value_lookup(); + if (!v) + return 0; + + cpumask = bpf_kptr_xchg(&v->cpumask, NULL); + + /* cpumask is never released. */ + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") +int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + struct __cpumask_map_value *v; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (cpumask_map_insert(cpumask)) + return 0; + + v = cpumask_map_value_lookup(); + if (!v) + return 0; + + cpumask = bpf_cpumask_kptr_get(&v->cpumask); + + /* cpumask is never released. */ + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("NULL pointer passed to trusted arg0") +int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) +{ + /* NULL passed to KF_TRUSTED_ARGS kfunc. */ + bpf_cpumask_empty(NULL); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c new file mode 100644 index 000000000000..1d38bc65d4b0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "cpumask_common.h" + +char _license[] SEC("license") = "GPL"; + +int pid, nr_cpus; + +static bool is_test_task(void) +{ + int cur_pid = bpf_get_current_pid_tgid() >> 32; + + return pid == cur_pid; +} + +static bool create_cpumask_set(struct bpf_cpumask **out1, + struct bpf_cpumask **out2, + struct bpf_cpumask **out3, + struct bpf_cpumask **out4) +{ + struct bpf_cpumask *mask1, *mask2, *mask3, *mask4; + + mask1 = create_cpumask(); + if (!mask1) + return false; + + mask2 = create_cpumask(); + if (!mask2) { + bpf_cpumask_release(mask1); + err = 3; + return false; + } + + mask3 = create_cpumask(); + if (!mask3) { + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + err = 4; + return false; + } + + mask4 = create_cpumask(); + if (!mask4) { + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + bpf_cpumask_release(mask3); + err = 5; + return false; + } + + *out1 = mask1; + *out2 = mask2; + *out3 = mask3; + *out4 = mask4; + + return true; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + if (!is_test_task()) + return 0; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + bpf_cpumask_release(cpumask); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + if (!is_test_task()) + return 0; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + bpf_cpumask_set_cpu(0, cpumask); + if (!bpf_cpumask_test_cpu(0, cast(cpumask))) { + err = 3; + goto release_exit; + } + + bpf_cpumask_clear_cpu(0, cpumask); + if (bpf_cpumask_test_cpu(0, cast(cpumask))) { + err = 4; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(cpumask); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + if (!is_test_task()) + return 0; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + bpf_cpumask_setall(cpumask); + if (!bpf_cpumask_full(cast(cpumask))) { + err = 3; + goto release_exit; + } + + bpf_cpumask_clear(cpumask); + if (!bpf_cpumask_empty(cast(cpumask))) { + err = 4; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(cpumask); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + if (!is_test_task()) + return 0; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) { + err = 3; + goto release_exit; + } + + if (bpf_cpumask_first_zero(cast(cpumask)) != 0) { + bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask))); + err = 4; + goto release_exit; + } + + bpf_cpumask_set_cpu(0, cpumask); + if (bpf_cpumask_first(cast(cpumask)) != 0) { + err = 5; + goto release_exit; + } + + if (bpf_cpumask_first_zero(cast(cpumask)) != 1) { + err = 6; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(cpumask); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + + if (!is_test_task()) + return 0; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (bpf_cpumask_test_and_set_cpu(0, cpumask)) { + err = 3; + goto release_exit; + } + + if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) { + err = 4; + goto release_exit; + } + + if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) { + err = 5; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(cpumask); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; + + if (!is_test_task()) + return 0; + + if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) + return 0; + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + + if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) { + err = 6; + goto release_exit; + } + if (!bpf_cpumask_empty(cast(dst1))) { + err = 7; + goto release_exit; + } + + bpf_cpumask_or(dst1, cast(mask1), cast(mask2)); + if (!bpf_cpumask_test_cpu(0, cast(dst1))) { + err = 8; + goto release_exit; + } + if (!bpf_cpumask_test_cpu(1, cast(dst1))) { + err = 9; + goto release_exit; + } + + bpf_cpumask_xor(dst2, cast(mask1), cast(mask2)); + if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) { + err = 10; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + bpf_cpumask_release(dst1); + bpf_cpumask_release(dst2); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; + + if (!is_test_task()) + return 0; + + if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) + return 0; + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) { + err = 6; + goto release_exit; + } + + bpf_cpumask_or(dst1, cast(mask1), cast(mask2)); + if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) { + err = 7; + goto release_exit; + } + + if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) { + err = 8; + goto release_exit; + } + + if (bpf_cpumask_subset(cast(dst1), cast(mask1))) { + err = 9; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + bpf_cpumask_release(dst1); + bpf_cpumask_release(dst2); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2, *dst1, *dst2; + u32 cpu; + + if (!is_test_task()) + return 0; + + if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2)) + return 0; + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + bpf_cpumask_or(dst1, cast(mask1), cast(mask2)); + + cpu = bpf_cpumask_any(cast(mask1)); + if (cpu != 0) { + err = 6; + goto release_exit; + } + + cpu = bpf_cpumask_any(cast(dst2)); + if (cpu < nr_cpus) { + err = 7; + goto release_exit; + } + + bpf_cpumask_copy(dst2, cast(dst1)); + if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) { + err = 8; + goto release_exit; + } + + cpu = bpf_cpumask_any(cast(dst2)); + if (cpu > 1) { + err = 9; + goto release_exit; + } + + cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2)); + if (cpu < nr_cpus) { + err = 10; + goto release_exit; + } + +release_exit: + bpf_cpumask_release(mask1); + bpf_cpumask_release(mask2); + bpf_cpumask_release(dst1); + bpf_cpumask_release(dst2); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + struct __cpumask_map_value *v; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (cpumask_map_insert(cpumask)) + err = 3; + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + struct __cpumask_map_value *v; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (cpumask_map_insert(cpumask)) { + err = 3; + return 0; + } + + v = cpumask_map_value_lookup(); + if (!v) { + err = 4; + return 0; + } + + cpumask = bpf_kptr_xchg(&v->cpumask, NULL); + if (cpumask) + bpf_cpumask_release(cpumask); + else + err = 5; + + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *cpumask; + struct __cpumask_map_value *v; + + cpumask = create_cpumask(); + if (!cpumask) + return 0; + + if (cpumask_map_insert(cpumask)) { + err = 3; + return 0; + } + + v = cpumask_map_value_lookup(); + if (!v) { + err = 4; + return 0; + } + + cpumask = bpf_cpumask_kptr_get(&v->cpumask); + if (cpumask) + bpf_cpumask_release(cpumask); + else + err = 5; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c new file mode 100644 index 000000000000..0bf969a0b5ed --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops.s/test_2") +__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops") +int BPF_PROG(test_unsupported_field_sleepable, + struct bpf_dummy_ops_state *state, int a1, unsigned short a2, + char a3, unsigned long a4) +{ + /* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */ + return 0; +} + +SEC(".struct_ops") +struct bpf_dummy_ops dummy_1 = { + .test_1 = NULL, + .test_2 = (void *)test_unsupported_field_sleepable, + .test_sleepable = (void *)NULL, +}; diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c index ead87edb75e2..1efa746c25dc 100644 --- a/tools/testing/selftests/bpf/progs/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c @@ -1,19 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2021. Huawei Technologies Co., Ltd */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -struct bpf_dummy_ops_state { - int val; -} __attribute__((preserve_access_index)); - -struct bpf_dummy_ops { - int (*test_1)(struct bpf_dummy_ops_state *state); - int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2, - char a3, unsigned long a4); -}; - char _license[] SEC("license") = "GPL"; SEC("struct_ops/test_1") @@ -43,8 +33,15 @@ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a return 0; } +SEC("struct_ops.s/test_sleepable") +int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state) +{ + return 0; +} + SEC(".struct_ops") struct bpf_dummy_ops dummy_1 = { .test_1 = (void *)test_1, .test_2 = (void *)test_2, + .test_sleepable = (void *)test_sleepable, }; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 78debc1b3820..5950ad6ec2e6 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -35,6 +35,13 @@ struct { __type(value, __u32); } array_map3 SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} array_map4 SEC(".maps"); + struct sample { int pid; long value; @@ -67,7 +74,7 @@ static int get_map_val_dynptr(struct bpf_dynptr *ptr) * bpf_ringbuf_submit/discard_dynptr call */ SEC("?raw_tp") -__failure __msg("Unreleased reference id=1") +__failure __msg("Unreleased reference id=2") int ringbuf_missing_release1(void *ctx) { struct bpf_dynptr ptr; @@ -80,7 +87,7 @@ int ringbuf_missing_release1(void *ctx) } SEC("?raw_tp") -__failure __msg("Unreleased reference id=2") +__failure __msg("Unreleased reference id=4") int ringbuf_missing_release2(void *ctx) { struct bpf_dynptr ptr1, ptr2; @@ -382,7 +389,7 @@ int invalid_helper1(void *ctx) /* A dynptr can't be passed into a helper function at a non-zero offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("cannot pass in dynptr at an offset=-8") int invalid_helper2(void *ctx) { struct bpf_dynptr ptr; @@ -420,7 +427,7 @@ int invalid_write1(void *ctx) * offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write2(void *ctx) { struct bpf_dynptr ptr; @@ -444,7 +451,7 @@ int invalid_write2(void *ctx) * non-const offset */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write3(void *ctx) { struct bpf_dynptr ptr; @@ -476,7 +483,7 @@ static int invalid_write4_callback(__u32 index, void *data) * be invalidated as a dynptr */ SEC("?raw_tp") -__failure __msg("arg 1 is an unacquired reference") +__failure __msg("cannot overwrite referenced dynptr") int invalid_write4(void *ctx) { struct bpf_dynptr ptr; @@ -584,7 +591,7 @@ int invalid_read4(void *ctx) /* Initializing a dynptr on an offset should fail */ SEC("?raw_tp") -__failure __msg("invalid write to stack") +__failure __msg("cannot pass in dynptr at an offset=0") int invalid_offset(void *ctx) { struct bpf_dynptr ptr; @@ -653,3 +660,435 @@ int dynptr_from_mem_invalid_api(void *ctx) return 0; } + +SEC("?tc") +__failure __msg("cannot overwrite referenced dynptr") __log_level(2) +int dynptr_pruning_overwrite(struct __sk_buff *ctx) +{ + asm volatile ( + "r9 = 0xeB9F; \ + r6 = %[ringbuf] ll; \ + r1 = r6; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -16; \ + call %[bpf_ringbuf_reserve_dynptr]; \ + if r0 == 0 goto pjmp1; \ + goto pjmp2; \ + pjmp1: \ + *(u64 *)(r10 - 16) = r9; \ + pjmp2: \ + r1 = r10; \ + r1 += -16; \ + r2 = 0; \ + call %[bpf_ringbuf_discard_dynptr]; " + : + : __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__success __msg("12: safe") __log_level(2) +int dynptr_pruning_stacksafe(struct __sk_buff *ctx) +{ + asm volatile ( + "r9 = 0xeB9F; \ + r6 = %[ringbuf] ll; \ + r1 = r6; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -16; \ + call %[bpf_ringbuf_reserve_dynptr]; \ + if r0 == 0 goto stjmp1; \ + goto stjmp2; \ + stjmp1: \ + r9 = r9; \ + stjmp2: \ + r1 = r10; \ + r1 += -16; \ + r2 = 0; \ + call %[bpf_ringbuf_discard_dynptr]; " + : + : __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__failure __msg("cannot overwrite referenced dynptr") __log_level(2) +int dynptr_pruning_type_confusion(struct __sk_buff *ctx) +{ + asm volatile ( + "r6 = %[array_map4] ll; \ + r7 = %[ringbuf] ll; \ + r1 = r6; \ + r2 = r10; \ + r2 += -8; \ + r9 = 0; \ + *(u64 *)(r2 + 0) = r9; \ + r3 = r10; \ + r3 += -24; \ + r9 = 0xeB9FeB9F; \ + *(u64 *)(r10 - 16) = r9; \ + *(u64 *)(r10 - 24) = r9; \ + r9 = 0; \ + r4 = 0; \ + r8 = r2; \ + call %[bpf_map_update_elem]; \ + r1 = r6; \ + r2 = r8; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto tjmp1; \ + exit; \ + tjmp1: \ + r8 = r0; \ + r1 = r7; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -16; \ + r0 = *(u64 *)(r0 + 0); \ + call %[bpf_ringbuf_reserve_dynptr]; \ + if r0 == 0 goto tjmp2; \ + r8 = r8; \ + r8 = r8; \ + r8 = r8; \ + r8 = r8; \ + r8 = r8; \ + r8 = r8; \ + r8 = r8; \ + goto tjmp3; \ + tjmp2: \ + *(u64 *)(r10 - 8) = r9; \ + *(u64 *)(r10 - 16) = r9; \ + r1 = r8; \ + r1 += 8; \ + r2 = 0; \ + r3 = 0; \ + r4 = r10; \ + r4 += -16; \ + call %[bpf_dynptr_from_mem]; \ + tjmp3: \ + r1 = r10; \ + r1 += -16; \ + r2 = 0; \ + call %[bpf_ringbuf_discard_dynptr]; " + : + : __imm(bpf_map_update_elem), + __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_dynptr_from_mem), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(array_map4), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__failure __msg("dynptr has to be at a constant offset") __log_level(2) +int dynptr_var_off_overwrite(struct __sk_buff *ctx) +{ + asm volatile ( + "r9 = 16; \ + *(u32 *)(r10 - 4) = r9; \ + r8 = *(u32 *)(r10 - 4); \ + if r8 >= 0 goto vjmp1; \ + r0 = 1; \ + exit; \ + vjmp1: \ + if r8 <= 16 goto vjmp2; \ + r0 = 1; \ + exit; \ + vjmp2: \ + r8 &= 16; \ + r1 = %[ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -32; \ + r4 += r8; \ + call %[bpf_ringbuf_reserve_dynptr]; \ + r9 = 0xeB9F; \ + *(u64 *)(r10 - 16) = r9; \ + r1 = r10; \ + r1 += -32; \ + r1 += r8; \ + r2 = 0; \ + call %[bpf_ringbuf_discard_dynptr]; " + : + : __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__failure __msg("cannot overwrite referenced dynptr") __log_level(2) +int dynptr_partial_slot_invalidate(struct __sk_buff *ctx) +{ + asm volatile ( + "r6 = %[ringbuf] ll; \ + r7 = %[array_map4] ll; \ + r1 = r7; \ + r2 = r10; \ + r2 += -8; \ + r9 = 0; \ + *(u64 *)(r2 + 0) = r9; \ + r3 = r2; \ + r4 = 0; \ + r8 = r2; \ + call %[bpf_map_update_elem]; \ + r1 = r7; \ + r2 = r8; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto sjmp1; \ + exit; \ + sjmp1: \ + r7 = r0; \ + r1 = r6; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -24; \ + call %[bpf_ringbuf_reserve_dynptr]; \ + *(u64 *)(r10 - 16) = r9; \ + r1 = r7; \ + r2 = 8; \ + r3 = 0; \ + r4 = r10; \ + r4 += -16; \ + call %[bpf_dynptr_from_mem]; \ + r1 = r10; \ + r1 += -512; \ + r2 = 488; \ + r3 = r10; \ + r3 += -24; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_dynptr_read]; \ + r8 = 1; \ + if r0 != 0 goto sjmp2; \ + r8 = 0; \ + sjmp2: \ + r1 = r10; \ + r1 += -24; \ + r2 = 0; \ + call %[bpf_ringbuf_discard_dynptr]; " + : + : __imm(bpf_map_update_elem), + __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm(bpf_dynptr_from_mem), + __imm(bpf_dynptr_read), + __imm_addr(ringbuf), + __imm_addr(array_map4) + : __clobber_all + ); + return 0; +} + +/* Test that it is allowed to overwrite unreferenced dynptr. */ +SEC("?raw_tp") +__success +int dynptr_overwrite_unref(void *ctx) +{ + struct bpf_dynptr ptr; + + if (get_map_val_dynptr(&ptr)) + return 0; + if (get_map_val_dynptr(&ptr)) + return 0; + if (get_map_val_dynptr(&ptr)) + return 0; + + return 0; +} + +/* Test that slices are invalidated on reinitializing a dynptr. */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int dynptr_invalidate_slice_reinit(void *ctx) +{ + struct bpf_dynptr ptr; + __u8 *p; + + if (get_map_val_dynptr(&ptr)) + return 0; + p = bpf_dynptr_data(&ptr, 0, 1); + if (!p) + return 0; + if (get_map_val_dynptr(&ptr)) + return 0; + /* this should fail */ + return *p; +} + +/* Invalidation of dynptr slices on destruction of dynptr should not miss + * mem_or_null pointers. + */ +SEC("?raw_tp") +__failure __msg("R1 type=scalar expected=percpu_ptr_") +int dynptr_invalidate_slice_or_null(void *ctx) +{ + struct bpf_dynptr ptr; + __u8 *p; + + if (get_map_val_dynptr(&ptr)) + return 0; + + p = bpf_dynptr_data(&ptr, 0, 1); + *(__u8 *)&ptr = 0; + /* this should fail */ + bpf_this_cpu_ptr(p); + return 0; +} + +/* Destruction of dynptr should also any slices obtained from it */ +SEC("?raw_tp") +__failure __msg("R7 invalid mem access 'scalar'") +int dynptr_invalidate_slice_failure(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u8 *p1, *p2; + + if (get_map_val_dynptr(&ptr1)) + return 0; + if (get_map_val_dynptr(&ptr2)) + return 0; + + p1 = bpf_dynptr_data(&ptr1, 0, 1); + if (!p1) + return 0; + p2 = bpf_dynptr_data(&ptr2, 0, 1); + if (!p2) + return 0; + + *(__u8 *)&ptr1 = 0; + /* this should fail */ + return *p1; +} + +/* Invalidation of slices should be scoped and should not prevent dereferencing + * slices of another dynptr after destroying unrelated dynptr + */ +SEC("?raw_tp") +__success +int dynptr_invalidate_slice_success(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u8 *p1, *p2; + + if (get_map_val_dynptr(&ptr1)) + return 1; + if (get_map_val_dynptr(&ptr2)) + return 1; + + p1 = bpf_dynptr_data(&ptr1, 0, 1); + if (!p1) + return 1; + p2 = bpf_dynptr_data(&ptr2, 0, 1); + if (!p2) + return 1; + + *(__u8 *)&ptr1 = 0; + return *p2; +} + +/* Overwriting referenced dynptr should be rejected */ +SEC("?raw_tp") +__failure __msg("cannot overwrite referenced dynptr") +int dynptr_overwrite_ref(void *ctx) +{ + struct bpf_dynptr ptr; + + bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr); + /* this should fail */ + if (get_map_val_dynptr(&ptr)) + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +/* Reject writes to dynptr slot from bpf_dynptr_read */ +SEC("?raw_tp") +__failure __msg("potential write to dynptr at off=-16") +int dynptr_read_into_slot(void *ctx) +{ + union { + struct { + char _pad[48]; + struct bpf_dynptr ptr; + }; + char buf[64]; + } data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &data.ptr); + /* this should fail */ + bpf_dynptr_read(data.buf, sizeof(data.buf), &data.ptr, 0, 0); + + return 0; +} + +/* Reject writes to dynptr slot for uninit arg */ +SEC("?raw_tp") +__failure __msg("potential write to dynptr at off=-16") +int uninit_write_into_slot(void *ctx) +{ + struct { + char buf[64]; + struct bpf_dynptr ptr; + } data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, 80, 0, &data.ptr); + /* this should fail */ + bpf_get_current_comm(data.buf, 80); + + return 0; +} + +static int callback(__u32 index, void *data) +{ + *(__u32 *)data = 123; + + return 0; +} + +/* If the dynptr is written into in a callback function, its data + * slices should be invalidated as well. + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int invalid_data_slices(void *ctx) +{ + struct bpf_dynptr ptr; + __u32 *slice; + + if (get_map_val_dynptr(&ptr)) + return 0; + + slice = bpf_dynptr_data(&ptr, 0, sizeof(__u32)); + if (!slice) + return 0; + + bpf_loop(10, callback, &ptr, 0); + + /* this should fail */ + *slice = 1; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h new file mode 100644 index 000000000000..83d33931136e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#ifndef _NESTED_TRUST_COMMON_H +#define _NESTED_TRUST_COMMON_H + +#include <stdbool.h> + +bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym; +bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; + +#endif /* _NESTED_TRUST_COMMON_H */ diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c new file mode 100644 index 000000000000..14aff7676436 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#include "nested_trust_common.h" + +char _license[] SEC("license") = "GPL"; + +/* Prototype for all of the program trace events below: + * + * TRACE_EVENT(task_newtask, + * TP_PROTO(struct task_struct *p, u64 clone_flags) + */ + +SEC("tp_btf/task_newtask") +__failure __msg("R2 must be referenced or trusted") +int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags) +{ + bpf_cpumask_test_cpu(0, task->user_cpus_ptr); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc") +int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags) +{ + bpf_cpumask_first_zero(&task->cpus_mask); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c new file mode 100644 index 000000000000..886ade4aa99d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#include "nested_trust_common.h" + +char _license[] SEC("license") = "GPL"; + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_read_cpumask, struct task_struct *task, u64 clone_flags) +{ + bpf_cpumask_test_cpu(0, task->cpus_ptr); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 9523333b8905..7a438600ae98 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -22,6 +22,7 @@ int nr_active; int nr_connect; int nr_binddev; int nr_socket_post_create; +int nr_fin_wait1; struct sockopt_test { int opt; @@ -386,6 +387,13 @@ int skops_sockopt(struct bpf_sock_ops *skops) nr_passive += !(bpf_test_sockopt(skops, sk) || test_tcp_maxseg(skops, sk) || test_tcp_saved_syn(skops, sk)); + bpf_sock_ops_cb_flags_set(skops, + skops->bpf_sock_ops_cb_flags | + BPF_SOCK_OPS_STATE_CB_FLAG); + break; + case BPF_SOCK_OPS_STATE_CB: + if (skops->args[1] == BPF_TCP_CLOSE_WAIT) + nr_fin_wait1 += !bpf_test_sockopt(skops, sk); break; } diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 1b47b94dbca0..f19d54eda4f1 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -5,6 +5,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" #include "task_kfunc_common.h" char _license[] SEC("license") = "GPL"; @@ -27,6 +28,7 @@ static struct __tasks_kfunc_map_value *insert_lookup_task(struct task_struct *ta } SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -44,6 +46,7 @@ int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_f } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 pointer type STRUCT task_struct must point") int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired, *stack_task = (struct task_struct *)&clone_flags; @@ -56,6 +59,7 @@ int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) } SEC("kretprobe/free_task") +__failure __msg("reg type unsupported for arg#0 function") int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -68,6 +72,7 @@ int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 } SEC("tp_btf/task_newtask") +__failure __msg("R1 must be referenced or trusted") int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -81,6 +86,7 @@ int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 cl SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -95,6 +101,7 @@ int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -107,6 +114,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; @@ -122,6 +130,7 @@ int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr, *acquired; @@ -140,6 +149,7 @@ int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clo } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 expected pointer to map value") int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; @@ -155,6 +165,7 @@ int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; @@ -174,6 +185,7 @@ int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_fla } SEC("tp_btf/task_newtask") +__failure __msg("Unreleased reference") int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; @@ -193,6 +205,7 @@ int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flag } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value *v; @@ -208,6 +221,7 @@ int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_f } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 pointer type STRUCT task_struct must point") int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired = (struct task_struct *)&clone_flags; @@ -219,6 +233,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value local, *v; @@ -251,6 +266,7 @@ int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") +__failure __msg("release kernel function bpf_task_release expects") int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_flags) { /* Cannot release trusted task pointer which was not acquired. */ @@ -260,6 +276,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") +__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; @@ -273,6 +290,7 @@ int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 cl } SEC("lsm/task_free") +__failure __msg("reg type unsupported for arg#0 function") int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) { struct task_struct *acquired; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 227e85e85dda..9fc603c9d673 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -34,6 +34,11 @@ __be16 dport = 0; int test_exist_lookup = -ENOENT; u32 test_exist_lookup_mark = 0; +enum nf_nat_manip_type___local { + NF_NAT_MANIP_SRC___local, + NF_NAT_MANIP_DST___local +}; + struct nf_conn; struct bpf_ct_opts___local { @@ -58,7 +63,7 @@ int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym; int bpf_ct_set_status(struct nf_conn *, u32) __ksym; int bpf_ct_change_status(struct nf_conn *, u32) __ksym; int bpf_ct_set_nat_info(struct nf_conn *, union nf_inet_addr *, - int port, enum nf_nat_manip_type) __ksym; + int port, enum nf_nat_manip_type___local) __ksym; static __always_inline void nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, @@ -157,10 +162,10 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, /* snat */ saddr.ip = bpf_get_prandom_u32(); - bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC); + bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local); /* dnat */ daddr.ip = bpf_get_prandom_u32(); - bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST); + bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local); ct_ins = bpf_ct_insert_entry(ct); if (ct_ins) { diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index a0e7762b1e5a..e6e678aa9874 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -38,6 +38,10 @@ static const int cfg_udp_src = 20000; #define VXLAN_FLAGS 0x8 #define VXLAN_VNI 1 +#ifndef NEXTHDR_DEST +#define NEXTHDR_DEST 60 +#endif + /* MPLS label 1000 with S bit (last label) set and ttl of 255. */ static const __u32 mpls_label = __bpf_constant_htonl(1000 << 12 | MPLS_LS_S_MASK | 0xff); @@ -363,6 +367,61 @@ static __always_inline int __encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, return TC_ACT_OK; } +static int encap_ipv6_ipip6(struct __sk_buff *skb) +{ + struct iphdr iph_inner; + struct v6hdr h_outer; + struct tcphdr tcph; + struct ethhdr eth; + __u64 flags; + int olen; + + if (bpf_skb_load_bytes(skb, ETH_HLEN, &iph_inner, + sizeof(iph_inner)) < 0) + return TC_ACT_OK; + + /* filter only packets we want */ + if (bpf_skb_load_bytes(skb, ETH_HLEN + (iph_inner.ihl << 2), + &tcph, sizeof(tcph)) < 0) + return TC_ACT_OK; + + if (tcph.dest != __bpf_constant_htons(cfg_port)) + return TC_ACT_OK; + + olen = sizeof(h_outer.ip); + + flags = BPF_F_ADJ_ROOM_FIXED_GSO | BPF_F_ADJ_ROOM_ENCAP_L3_IPV6; + + /* add room between mac and network header */ + if (bpf_skb_adjust_room(skb, olen, BPF_ADJ_ROOM_MAC, flags)) + return TC_ACT_SHOT; + + /* prepare new outer network header */ + memset(&h_outer.ip, 0, sizeof(h_outer.ip)); + h_outer.ip.version = 6; + h_outer.ip.hop_limit = iph_inner.ttl; + h_outer.ip.saddr.s6_addr[1] = 0xfd; + h_outer.ip.saddr.s6_addr[15] = 1; + h_outer.ip.daddr.s6_addr[1] = 0xfd; + h_outer.ip.daddr.s6_addr[15] = 2; + h_outer.ip.payload_len = iph_inner.tot_len; + h_outer.ip.nexthdr = IPPROTO_IPIP; + + /* store new outer network header */ + if (bpf_skb_store_bytes(skb, ETH_HLEN, &h_outer, olen, + BPF_F_INVALIDATE_HASH) < 0) + return TC_ACT_SHOT; + + /* update eth->h_proto */ + if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth)) < 0) + return TC_ACT_SHOT; + eth.h_proto = bpf_htons(ETH_P_IPV6); + if (bpf_skb_store_bytes(skb, 0, ð, sizeof(eth), 0) < 0) + return TC_ACT_SHOT; + + return TC_ACT_OK; +} + static __always_inline int encap_ipv6(struct __sk_buff *skb, __u8 encap_proto, __u16 l2_proto) { @@ -461,6 +520,15 @@ int __encap_ip6tnl_none(struct __sk_buff *skb) return TC_ACT_OK; } +SEC("encap_ipip6_none") +int __encap_ipip6_none(struct __sk_buff *skb) +{ + if (skb->protocol == __bpf_constant_htons(ETH_P_IP)) + return encap_ipv6_ipip6(skb); + else + return TC_ACT_OK; +} + SEC("encap_ip6gre_none") int __encap_ip6gre_none(struct __sk_buff *skb) { @@ -528,13 +596,33 @@ int __encap_ip6vxlan_eth(struct __sk_buff *skb) static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) { + __u64 flags = BPF_F_ADJ_ROOM_FIXED_GSO; + struct ipv6_opt_hdr ip6_opt_hdr; struct gre_hdr greh; struct udphdr udph; int olen = len; switch (proto) { case IPPROTO_IPIP: + flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4; + break; case IPPROTO_IPV6: + flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6; + break; + case NEXTHDR_DEST: + if (bpf_skb_load_bytes(skb, off + len, &ip6_opt_hdr, + sizeof(ip6_opt_hdr)) < 0) + return TC_ACT_OK; + switch (ip6_opt_hdr.nexthdr) { + case IPPROTO_IPIP: + flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV4; + break; + case IPPROTO_IPV6: + flags |= BPF_F_ADJ_ROOM_DECAP_L3_IPV6; + break; + default: + return TC_ACT_OK; + } break; case IPPROTO_GRE: olen += sizeof(struct gre_hdr); @@ -569,8 +657,7 @@ static int decap_internal(struct __sk_buff *skb, int off, int len, char proto) return TC_ACT_OK; } - if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, - BPF_F_ADJ_ROOM_FIXED_GSO)) + if (bpf_skb_adjust_room(skb, -olen, BPF_ADJ_ROOM_MAC, flags)) return TC_ACT_SHOT; return TC_ACT_OK; diff --git a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c index ab75522e2eeb..774ddeb45898 100644 --- a/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c +++ b/tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c @@ -6,10 +6,12 @@ #include <bpf/bpf_core_read.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include "bpf_misc.h" int uprobe_byname_parm1 = 0; int uprobe_byname_ran = 0; int uretprobe_byname_rc = 0; +int uretprobe_byname_ret = 0; int uretprobe_byname_ran = 0; size_t uprobe_byname2_parm1 = 0; int uprobe_byname2_ran = 0; @@ -18,6 +20,8 @@ int uretprobe_byname2_ran = 0; int test_pid; +int a[8]; + /* This program cannot auto-attach, but that should not stop other * programs from attaching. */ @@ -28,18 +32,58 @@ int handle_uprobe_noautoattach(struct pt_regs *ctx) } SEC("uprobe//proc/self/exe:autoattach_trigger_func") -int handle_uprobe_byname(struct pt_regs *ctx) +int BPF_UPROBE(handle_uprobe_byname + , int arg1 + , int arg2 + , int arg3 +#if FUNC_REG_ARG_CNT > 3 + , int arg4 +#endif +#if FUNC_REG_ARG_CNT > 4 + , int arg5 +#endif +#if FUNC_REG_ARG_CNT > 5 + , int arg6 +#endif +#if FUNC_REG_ARG_CNT > 6 + , int arg7 +#endif +#if FUNC_REG_ARG_CNT > 7 + , int arg8 +#endif +) { uprobe_byname_parm1 = PT_REGS_PARM1_CORE(ctx); uprobe_byname_ran = 1; + + a[0] = arg1; + a[1] = arg2; + a[2] = arg3; +#if FUNC_REG_ARG_CNT > 3 + a[3] = arg4; +#endif +#if FUNC_REG_ARG_CNT > 4 + a[4] = arg5; +#endif +#if FUNC_REG_ARG_CNT > 5 + a[5] = arg6; +#endif +#if FUNC_REG_ARG_CNT > 6 + a[6] = arg7; +#endif +#if FUNC_REG_ARG_CNT > 7 + a[7] = arg8; +#endif return 0; } SEC("uretprobe//proc/self/exe:autoattach_trigger_func") -int handle_uretprobe_byname(struct pt_regs *ctx) +int BPF_URETPROBE(handle_uretprobe_byname, int ret) { uretprobe_byname_rc = PT_REGS_RC_CORE(ctx); + uretprobe_byname_ret = ret; uretprobe_byname_ran = 2; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index 134768f6b788..cdf3c48d6cbb 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -195,7 +195,7 @@ int xdp_prognum2(struct xdp_md *ctx) /* Moving Ethernet header, dest overlap with src, memmove handle this */ dest = data; - dest+= VLAN_HDR_SZ; + dest += VLAN_HDR_SZ; /* * Notice: Taking over vlan_hdr->h_vlan_encapsulated_proto, by * only moving two MAC addrs (12 bytes), not overwriting last 2 bytes diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c new file mode 100644 index 000000000000..25b8178735ee --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include "xdp_metadata.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 256); + __type(key, __u32); + __type(value, __u32); +} xsk SEC(".maps"); + +extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, + __u64 *timestamp) __ksym; +extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, + __u32 *hash) __ksym; + +SEC("xdp") +int rx(struct xdp_md *ctx) +{ + void *data, *data_meta, *data_end; + struct ipv6hdr *ip6h = NULL; + struct ethhdr *eth = NULL; + struct udphdr *udp = NULL; + struct iphdr *iph = NULL; + struct xdp_meta *meta; + int ret; + + data = (void *)(long)ctx->data; + data_end = (void *)(long)ctx->data_end; + eth = data; + if (eth + 1 < data_end) { + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + iph = (void *)(eth + 1); + if (iph + 1 < data_end && iph->protocol == IPPROTO_UDP) + udp = (void *)(iph + 1); + } + if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { + ip6h = (void *)(eth + 1); + if (ip6h + 1 < data_end && ip6h->nexthdr == IPPROTO_UDP) + udp = (void *)(ip6h + 1); + } + if (udp && udp + 1 > data_end) + udp = NULL; + } + + if (!udp) + return XDP_PASS; + + if (udp->dest != bpf_htons(9091)) + return XDP_PASS; + + bpf_printk("forwarding UDP:9091 to AF_XDP"); + + ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta)); + if (ret != 0) { + bpf_printk("bpf_xdp_adjust_meta returned %d", ret); + return XDP_PASS; + } + + data = (void *)(long)ctx->data; + data_meta = (void *)(long)ctx->data_meta; + meta = data_meta; + + if (meta + 1 > data) { + bpf_printk("bpf_xdp_adjust_meta doesn't appear to work"); + return XDP_PASS; + } + + if (!bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp)) + bpf_printk("populated rx_timestamp with %u", meta->rx_timestamp); + + if (!bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash)) + bpf_printk("populated rx_hash with %u", meta->rx_hash); + + return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c new file mode 100644 index 000000000000..77678b034389 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include "xdp_metadata.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 4); + __type(key, __u32); + __type(value, __u32); +} xsk SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} prog_arr SEC(".maps"); + +extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, + __u64 *timestamp) __ksym; +extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, + __u32 *hash) __ksym; + +SEC("xdp") +int rx(struct xdp_md *ctx) +{ + void *data, *data_meta; + struct xdp_meta *meta; + u64 timestamp = -1; + int ret; + + /* Reserve enough for all custom metadata. */ + + ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(struct xdp_meta)); + if (ret != 0) + return XDP_DROP; + + data = (void *)(long)ctx->data; + data_meta = (void *)(long)ctx->data_meta; + + if (data_meta + sizeof(struct xdp_meta) > data) + return XDP_DROP; + + meta = data_meta; + + /* Export metadata. */ + + /* We expect veth bpf_xdp_metadata_rx_timestamp to return 0 HW + * timestamp, so put some non-zero value into AF_XDP frame for + * the userspace. + */ + bpf_xdp_metadata_rx_timestamp(ctx, ×tamp); + if (timestamp == 0) + meta->rx_timestamp = 1; + + bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash); + + return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata2.c b/tools/testing/selftests/bpf/progs/xdp_metadata2.c new file mode 100644 index 000000000000..cf69d05451c3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdp_metadata2.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include "xdp_metadata.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, + __u32 *hash) __ksym; + +int called; + +SEC("freplace/rx") +int freplace_rx(struct xdp_md *ctx) +{ + u32 hash = 0; + /* Call _any_ metadata function to make sure we don't crash. */ + bpf_xdp_metadata_rx_hash(ctx, &hash); + called++; + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c new file mode 100644 index 000000000000..744a01d0e57d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Intel */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __uint(key_size, sizeof(int)); + __uint(value_size, sizeof(int)); +} xsk SEC(".maps"); + +static unsigned int idx; + +SEC("xdp") int xsk_def_prog(struct xdp_md *xdp) +{ + return bpf_redirect_map(&xsk, 0, XDP_DROP); +} + +SEC("xdp") int xsk_xdp_drop(struct xdp_md *xdp) +{ + /* Drop every other packet */ + if (idx++ % 2) + return XDP_DROP; + + return bpf_redirect_map(&xsk, 0, XDP_DROP); +} + +char _license[] SEC("license") = "GPL"; |