diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-08-03 15:34:36 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-08-03 15:34:36 -0700 |
commit | d07b7b32da6f678d42d96a8b9824cf0a181ce140 (patch) | |
tree | 606829d4b33a57dbe0f0e825ca8505e0b5fcb759 /tools | |
parent | 35b1b1fd96388d5e3cf179bf36bd8a4153baf4a3 (diff) | |
parent | 648880e9331c68b2008430fd90f3648d1795399d (diff) |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Martin KaFai Lau says:
====================
pull-request: bpf-next 2023-08-03
We've added 54 non-merge commits during the last 10 day(s) which contain
a total of 84 files changed, 4026 insertions(+), 562 deletions(-).
The main changes are:
1) Add SO_REUSEPORT support for TC bpf_sk_assign from Lorenz Bauer,
Daniel Borkmann
2) Support new insns from cpu v4 from Yonghong Song
3) Non-atomically allocate freelist during prefill from YiFei Zhu
4) Support defragmenting IPv(4|6) packets in BPF from Daniel Xu
5) Add tracepoint to xdp attaching failure from Leon Hwang
6) struct netdev_rx_queue and xdp.h reshuffling to reduce
rebuild time from Jakub Kicinski
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (54 commits)
net: invert the netdevice.h vs xdp.h dependency
net: move struct netdev_rx_queue out of netdevice.h
eth: add missing xdp.h includes in drivers
selftests/bpf: Add testcase for xdp attaching failure tracepoint
bpf, xdp: Add tracepoint to xdp attaching failure
selftests/bpf: fix static assert compilation issue for test_cls_*.c
bpf: fix bpf_probe_read_kernel prototype mismatch
riscv, bpf: Adapt bpf trampoline to optimized riscv ftrace framework
libbpf: fix typos in Makefile
tracing: bpf: use struct trace_entry in struct syscall_tp_t
bpf, devmap: Remove unused dtab field from bpf_dtab_netdev
bpf, cpumap: Remove unused cmap field from bpf_cpu_map_entry
netfilter: bpf: Only define get_proto_defrag_hook() if necessary
bpf: Fix an array-index-out-of-bounds issue in disasm.c
net: remove duplicate INDIRECT_CALLABLE_DECLARE of udp[6]_ehashfn
docs/bpf: Fix malformed documentation
bpf: selftests: Add defrag selftests
bpf: selftests: Support custom type and proto for client sockets
bpf: selftests: Support not connecting client socket
netfilter: bpf: Support BPF_F_NETFILTER_IP_DEFRAG in netfilter link
...
====================
Link: https://lore.kernel.org/r/20230803174845.825419-1-martin.lau@linux.dev
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools')
26 files changed, 2571 insertions, 24 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 739c15906a65..70da85200695 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -19,6 +19,7 @@ /* ld/ldx fields */ #define BPF_DW 0x18 /* double word (64-bit) */ +#define BPF_MEMSX 0x80 /* load with sign extension */ #define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ #define BPF_XADD 0xc0 /* exclusive add - legacy name */ @@ -1187,6 +1188,11 @@ enum bpf_perf_event_type { */ #define BPF_F_KPROBE_MULTI_RETURN (1U << 0) +/* link_create.netfilter.flags used in LINK_CREATE command for + * BPF_PROG_TYPE_NETFILTER to enable IP packet defragmentation. + */ +#define BPF_F_NETFILTER_IP_DEFRAG (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -4198,9 +4204,6 @@ union bpf_attr { * **-EOPNOTSUPP** if the operation is not supported, for example * a call from outside of TC ingress. * - * **-ESOCKTNOSUPPORT** if the socket type is not supported - * (reuseport). - * * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) * Description * Helper is overloaded depending on BPF program type. This diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index cf7f02c67968..4be7144e4803 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -293,11 +293,11 @@ help: @echo ' HINT: use "V=1" to enable verbose build' @echo ' all - build libraries and pkgconfig' @echo ' clean - remove all generated files' - @echo ' check - check abi and version info' + @echo ' check - check ABI and version info' @echo '' @echo 'libbpf install targets:' @echo ' HINT: use "prefix"(defaults to "/usr/local") or "DESTDIR" (defaults to "/")' - @echo ' to adjust target desitantion, e.g. "make prefix=/usr/local install"' + @echo ' to adjust target destination, e.g. "make prefix=/usr/local install"' @echo ' install - build and install all headers, libraries and pkgconfig' @echo ' install_headers - install only headers to include/bpf' @echo '' diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 116fecf80ca1..110518ba4804 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -13,6 +13,7 @@ test_dev_cgroup /test_progs /test_progs-no_alu32 /test_progs-bpf_gcc +/test_progs-cpuv4 test_verifier_log feature test_sock @@ -36,6 +37,7 @@ test_cpp *.lskel.h /no_alu32 /bpf_gcc +/cpuv4 /host-tools /tools /runqslower diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 882be03b179f..e4e1e6492268 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -33,11 +33,16 @@ CFLAGS += -g -O0 -rdynamic -Wall -Werror $(GENFLAGS) $(SAN_CFLAGS) \ LDFLAGS += $(SAN_LDFLAGS) LDLIBS += -lelf -lz -lrt -lpthread -# Silence some warnings when compiled with clang ifneq ($(LLVM),) +# Silence some warnings when compiled with clang CFLAGS += -Wno-unused-command-line-argument endif +# Check whether bpf cpu=v4 is supported or not by clang +ifneq ($(shell $(CLANG) --target=bpf -mcpu=help 2>&1 | grep 'v4'),) +CLANG_CPUV4 := 1 +endif + # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ test_dev_cgroup \ @@ -51,6 +56,10 @@ ifneq ($(BPF_GCC),) TEST_GEN_PROGS += test_progs-bpf_gcc endif +ifneq ($(CLANG_CPUV4),) +TEST_GEN_PROGS += test_progs-cpuv4 +endif + TEST_GEN_FILES = test_lwt_ip_encap.bpf.o test_tc_edt.bpf.o TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) @@ -383,6 +392,11 @@ define CLANG_NOALU32_BPF_BUILD_RULE $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 endef +# Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 +define CLANG_CPUV4_BPF_BUILD_RULE + $(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2) + $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 +endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE $(call msg,GCC-BPF,$(TRUNNER_BINARY),$2) @@ -425,7 +439,7 @@ LINKED_BPF_SRCS := $(patsubst %.bpf.o,%.c,$(foreach skel,$(LINKED_SKELS),$($(ske # $eval()) and pass control to DEFINE_TEST_RUNNER_RULES. # Parameters: # $1 - test runner base binary name (e.g., test_progs) -# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc) +# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) define DEFINE_TEST_RUNNER TRUNNER_OUTPUT := $(OUTPUT)$(if $2,/)$2 @@ -453,7 +467,7 @@ endef # Using TRUNNER_XXX variables, provided by callers of DEFINE_TEST_RUNNER and # set up by DEFINE_TEST_RUNNER itself, create test runner build rules with: # $1 - test runner base binary name (e.g., test_progs) -# $2 - test runner extra "flavor" (e.g., no_alu32, gcc-bpf, etc) +# $2 - test runner extra "flavor" (e.g., no_alu32, cpuv4, gcc-bpf, etc) define DEFINE_TEST_RUNNER_RULES ifeq ($($(TRUNNER_OUTPUT)-dir),) @@ -565,8 +579,8 @@ TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ btf_helpers.c flow_dissector_load.h \ cap_helpers.c test_loader.c xsk.c disasm.c \ - json_writer.c unpriv_helpers.c - + json_writer.c unpriv_helpers.c \ + ip_check_defrag_frags.h TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ @@ -584,6 +598,13 @@ TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) $(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32)) +# Define test_progs-cpuv4 test runner. +ifneq ($(CLANG_CPUV4),) +TRUNNER_BPF_BUILD_RULE := CLANG_CPUV4_BPF_BUILD_RULE +TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) +$(eval $(call DEFINE_TEST_RUNNER,test_progs,cpuv4)) +endif + # Define test_progs BPF-GCC-flavored test runner. ifneq ($(BPF_GCC),) TRUNNER_BPF_BUILD_RULE := GCC_BPF_BUILD_RULE @@ -681,7 +702,7 @@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \ - no_alu32 bpf_gcc bpf_testmod.ko \ + no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ liburandom_read.so) .PHONY: docs docs-clean diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index a6f991b56345..cefc5dd72573 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -98,6 +98,12 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, return bpf_testmod_test_struct_arg_result; } +noinline int +bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { + bpf_testmod_test_struct_arg_result = a->a; + return bpf_testmod_test_struct_arg_result; +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { @@ -240,7 +246,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, .off = off, .len = len, }; - struct bpf_testmod_struct_arg_1 struct_arg1 = {10}; + struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1}; struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; struct bpf_testmod_struct_arg_3 *struct_arg3; struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; @@ -259,6 +265,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, (void *)20, struct_arg4, 23); + (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + sizeof(int)), GFP_KERNEL); diff --git a/tools/testing/selftests/bpf/generate_udp_fragments.py b/tools/testing/selftests/bpf/generate_udp_fragments.py new file mode 100755 index 000000000000..2b8a1187991c --- /dev/null +++ b/tools/testing/selftests/bpf/generate_udp_fragments.py @@ -0,0 +1,90 @@ +#!/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +""" +This script helps generate fragmented UDP packets. + +While it is technically possible to dynamically generate +fragmented packets in C, it is much harder to read and write +said code. `scapy` is relatively industry standard and really +easy to read / write. + +So we choose to write this script that generates a valid C +header. Rerun script and commit generated file after any +modifications. +""" + +import argparse +import os + +from scapy.all import * + + +# These constants must stay in sync with `ip_check_defrag.c` +VETH1_ADDR = "172.16.1.200" +VETH0_ADDR6 = "fc00::100" +VETH1_ADDR6 = "fc00::200" +CLIENT_PORT = 48878 +SERVER_PORT = 48879 +MAGIC_MESSAGE = "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME" + + +def print_header(f): + f.write("// SPDX-License-Identifier: GPL-2.0\n") + f.write("/* DO NOT EDIT -- this file is generated */\n") + f.write("\n") + f.write("#ifndef _IP_CHECK_DEFRAG_FRAGS_H\n") + f.write("#define _IP_CHECK_DEFRAG_FRAGS_H\n") + f.write("\n") + f.write("#include <stdint.h>\n") + f.write("\n") + + +def print_frags(f, frags, v6): + for idx, frag in enumerate(frags): + # 10 bytes per line to keep width in check + chunks = [frag[i : i + 10] for i in range(0, len(frag), 10)] + chunks_fmted = [", ".join([str(hex(b)) for b in chunk]) for chunk in chunks] + suffix = "6" if v6 else "" + + f.write(f"static uint8_t frag{suffix}_{idx}[] = {{\n") + for chunk in chunks_fmted: + f.write(f"\t{chunk},\n") + f.write(f"}};\n") + + +def print_trailer(f): + f.write("\n") + f.write("#endif /* _IP_CHECK_DEFRAG_FRAGS_H */\n") + + +def main(f): + # srcip of 0 is filled in by IP_HDRINCL + sip = "0.0.0.0" + sip6 = VETH0_ADDR6 + dip = VETH1_ADDR + dip6 = VETH1_ADDR6 + sport = CLIENT_PORT + dport = SERVER_PORT + payload = MAGIC_MESSAGE.encode() + + # Disable UDPv4 checksums to keep code simpler + pkt = IP(src=sip,dst=dip) / UDP(sport=sport,dport=dport,chksum=0) / Raw(load=payload) + # UDPv6 requires a checksum + # Also pin the ipv6 fragment header ID, otherwise it's a random value + pkt6 = IPv6(src=sip6,dst=dip6) / IPv6ExtHdrFragment(id=0xBEEF) / UDP(sport=sport,dport=dport) / Raw(load=payload) + + frags = [f.build() for f in pkt.fragment(24)] + frags6 = [f.build() for f in fragment6(pkt6, 72)] + + print_header(f) + print_frags(f, frags, False) + print_frags(f, frags6, True) + print_trailer(f) + + +if __name__ == "__main__": + dir = os.path.dirname(os.path.realpath(__file__)) + header = f"{dir}/ip_check_defrag_frags.h" + with open(header, "w") as f: + main(f) diff --git a/tools/testing/selftests/bpf/ip_check_defrag_frags.h b/tools/testing/selftests/bpf/ip_check_defrag_frags.h new file mode 100644 index 000000000000..70ab7e9fa22b --- /dev/null +++ b/tools/testing/selftests/bpf/ip_check_defrag_frags.h @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* DO NOT EDIT -- this file is generated */ + +#ifndef _IP_CHECK_DEFRAG_FRAGS_H +#define _IP_CHECK_DEFRAG_FRAGS_H + +#include <stdint.h> + +static uint8_t frag_0[] = { + 0x45, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x20, 0x0, 0x40, 0x11, + 0xac, 0xe8, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8, + 0xbe, 0xee, 0xbe, 0xef, 0x0, 0x3a, 0x0, 0x0, 0x54, 0x48, + 0x49, 0x53, 0x20, 0x49, 0x53, 0x20, 0x54, 0x48, 0x45, 0x20, + 0x4f, 0x52, 0x49, 0x47, +}; +static uint8_t frag_1[] = { + 0x45, 0x0, 0x0, 0x2c, 0x0, 0x1, 0x20, 0x3, 0x40, 0x11, + 0xac, 0xe5, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8, + 0x49, 0x4e, 0x41, 0x4c, 0x20, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x2c, 0x20, 0x50, 0x4c, 0x45, 0x41, 0x53, 0x45, + 0x20, 0x52, 0x45, 0x41, +}; +static uint8_t frag_2[] = { + 0x45, 0x0, 0x0, 0x1e, 0x0, 0x1, 0x0, 0x6, 0x40, 0x11, + 0xcc, 0xf0, 0x0, 0x0, 0x0, 0x0, 0xac, 0x10, 0x1, 0xc8, + 0x53, 0x53, 0x45, 0x4d, 0x42, 0x4c, 0x45, 0x20, 0x4d, 0x45, +}; +static uint8_t frag6_0[] = { + 0x60, 0x0, 0x0, 0x0, 0x0, 0x20, 0x2c, 0x40, 0xfc, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, + 0x11, 0x0, 0x0, 0x1, 0x0, 0x0, 0xbe, 0xef, 0xbe, 0xee, + 0xbe, 0xef, 0x0, 0x3a, 0xd0, 0xf8, 0x54, 0x48, 0x49, 0x53, + 0x20, 0x49, 0x53, 0x20, 0x54, 0x48, 0x45, 0x20, 0x4f, 0x52, + 0x49, 0x47, +}; +static uint8_t frag6_1[] = { + 0x60, 0x0, 0x0, 0x0, 0x0, 0x20, 0x2c, 0x40, 0xfc, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, + 0x11, 0x0, 0x0, 0x19, 0x0, 0x0, 0xbe, 0xef, 0x49, 0x4e, + 0x41, 0x4c, 0x20, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x2c, 0x20, 0x50, 0x4c, 0x45, 0x41, 0x53, 0x45, 0x20, 0x52, + 0x45, 0x41, +}; +static uint8_t frag6_2[] = { + 0x60, 0x0, 0x0, 0x0, 0x0, 0x12, 0x2c, 0x40, 0xfc, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x1, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x0, + 0x11, 0x0, 0x0, 0x30, 0x0, 0x0, 0xbe, 0xef, 0x53, 0x53, + 0x45, 0x4d, 0x42, 0x4c, 0x45, 0x20, 0x4d, 0x45, +}; + +#endif /* _IP_CHECK_DEFRAG_FRAGS_H */ diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index a105c0cd008a..da72a3a66230 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -270,14 +270,23 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) opts = &default_opts; optlen = sizeof(type); - if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { - log_err("getsockopt(SOL_TYPE)"); - return -1; + + if (opts->type) { + type = opts->type; + } else { + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); + return -1; + } } - if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { - log_err("getsockopt(SOL_PROTOCOL)"); - return -1; + if (opts->proto) { + protocol = opts->proto; + } else { + if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { + log_err("getsockopt(SOL_PROTOCOL)"); + return -1; + } } addrlen = sizeof(addr); @@ -301,8 +310,9 @@ int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) strlen(opts->cc) + 1)) goto error_close; - if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) - goto error_close; + if (!opts->noconnect) + if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) + goto error_close; return fd; @@ -423,6 +433,9 @@ fail: void close_netns(struct nstoken *token) { + if (!token) + return; + ASSERT_OK(setns(token->orig_netns_fd, CLONE_NEWNET), "setns"); close(token->orig_netns_fd); free(token); diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 694185644da6..5eccc67d1a99 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -21,6 +21,9 @@ struct network_helper_opts { const char *cc; int timeout_ms; bool must_fail; + bool noconnect; + int type; + int proto; }; /* ipv4 test vector */ diff --git a/tools/testing/selftests/bpf/prog_tests/assign_reuse.c b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c new file mode 100644 index 000000000000..989ee4d9785b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/assign_reuse.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <uapi/linux/if_link.h> +#include <test_progs.h> + +#include <netinet/tcp.h> +#include <netinet/udp.h> + +#include "network_helpers.h" +#include "test_assign_reuse.skel.h" + +#define NS_TEST "assign_reuse" +#define LOOPBACK 1 +#define PORT 4443 + +static int attach_reuseport(int sock_fd, int prog_fd) +{ + return setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); +} + +static __u64 cookie(int fd) +{ + __u64 cookie = 0; + socklen_t cookie_len = sizeof(cookie); + int ret; + + ret = getsockopt(fd, SOL_SOCKET, SO_COOKIE, &cookie, &cookie_len); + ASSERT_OK(ret, "cookie"); + ASSERT_GT(cookie, 0, "cookie_invalid"); + + return cookie; +} + +static int echo_test_udp(int fd_sv) +{ + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + char buff[1] = {}; + int fd_cl = -1, ret; + + fd_cl = connect_to_fd(fd_sv, 100); + ASSERT_GT(fd_cl, 0, "create_client"); + ASSERT_EQ(getsockname(fd_cl, (void *)&addr, &len), 0, "getsockname"); + + ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); + + ret = recv(fd_sv, buff, sizeof(buff), 0); + if (ret < 0) { + close(fd_cl); + return errno; + } + + ASSERT_EQ(ret, 1, "recv_server"); + ASSERT_EQ(sendto(fd_sv, buff, sizeof(buff), 0, (void *)&addr, len), 1, "send_server"); + ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); + close(fd_cl); + return 0; +} + +static int echo_test_tcp(int fd_sv) +{ + char buff[1] = {}; + int fd_cl = -1, fd_sv_cl = -1; + + fd_cl = connect_to_fd(fd_sv, 100); + if (fd_cl < 0) + return errno; + + fd_sv_cl = accept(fd_sv, NULL, NULL); + ASSERT_GE(fd_sv_cl, 0, "accept_fd"); + + ASSERT_EQ(send(fd_cl, buff, sizeof(buff), 0), 1, "send_client"); + ASSERT_EQ(recv(fd_sv_cl, buff, sizeof(buff), 0), 1, "recv_server"); + ASSERT_EQ(send(fd_sv_cl, buff, sizeof(buff), 0), 1, "send_server"); + ASSERT_EQ(recv(fd_cl, buff, sizeof(buff), 0), 1, "recv_client"); + close(fd_sv_cl); + close(fd_cl); + return 0; +} + +void run_assign_reuse(int family, int sotype, const char *ip, __u16 port) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .ifindex = LOOPBACK, + .attach_point = BPF_TC_INGRESS, + ); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, tc_opts, + .handle = 1, + .priority = 1, + ); + bool hook_created = false, tc_attached = false; + int ret, fd_tc, fd_accept, fd_drop, fd_map; + int *fd_sv = NULL; + __u64 fd_val; + struct test_assign_reuse *skel; + const int zero = 0; + + skel = test_assign_reuse__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + skel->rodata->dest_port = port; + + ret = test_assign_reuse__load(skel); + if (!ASSERT_OK(ret, "skel_load")) + goto cleanup; + + ASSERT_EQ(skel->bss->sk_cookie_seen, 0, "cookie_init"); + + fd_tc = bpf_program__fd(skel->progs.tc_main); + fd_accept = bpf_program__fd(skel->progs.reuse_accept); + fd_drop = bpf_program__fd(skel->progs.reuse_drop); + fd_map = bpf_map__fd(skel->maps.sk_map); + + fd_sv = start_reuseport_server(family, sotype, ip, port, 100, 1); + if (!ASSERT_NEQ(fd_sv, NULL, "start_reuseport_server")) + goto cleanup; + + ret = attach_reuseport(*fd_sv, fd_drop); + if (!ASSERT_OK(ret, "attach_reuseport")) + goto cleanup; + + fd_val = *fd_sv; + ret = bpf_map_update_elem(fd_map, &zero, &fd_val, BPF_NOEXIST); + if (!ASSERT_OK(ret, "bpf_sk_map")) + goto cleanup; + + ret = bpf_tc_hook_create(&tc_hook); + if (ret == 0) + hook_created = true; + ret = ret == -EEXIST ? 0 : ret; + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto cleanup; + + tc_opts.prog_fd = fd_tc; + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto cleanup; + tc_attached = true; + + if (sotype == SOCK_STREAM) + ASSERT_EQ(echo_test_tcp(*fd_sv), ECONNREFUSED, "drop_tcp"); + else + ASSERT_EQ(echo_test_udp(*fd_sv), EAGAIN, "drop_udp"); + ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); + + skel->bss->sk_cookie_seen = 0; + skel->bss->reuseport_executed = 0; + ASSERT_OK(attach_reuseport(*fd_sv, fd_accept), "attach_reuseport(accept)"); + + if (sotype == SOCK_STREAM) + ASSERT_EQ(echo_test_tcp(*fd_sv), 0, "echo_tcp"); + else + ASSERT_EQ(echo_test_udp(*fd_sv), 0, "echo_udp"); + + ASSERT_EQ(skel->bss->sk_cookie_seen, cookie(*fd_sv), + "cookie_mismatch"); + ASSERT_EQ(skel->bss->reuseport_executed, 1, "program executed once"); +cleanup: + if (tc_attached) { + tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0; + ret = bpf_tc_detach(&tc_hook, &tc_opts); + ASSERT_OK(ret, "bpf_tc_detach"); + } + if (hook_created) { + tc_hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS; + bpf_tc_hook_destroy(&tc_hook); + } + test_assign_reuse__destroy(skel); + free_fds(fd_sv, 1); +} + +void test_assign_reuse(void) +{ + struct nstoken *tok = NULL; + + SYS(out, "ip netns add %s", NS_TEST); + SYS(cleanup, "ip -net %s link set dev lo up", NS_TEST); + + tok = open_netns(NS_TEST); + if (!ASSERT_OK_PTR(tok, "netns token")) + return; + + if (test__start_subtest("tcpv4")) + run_assign_reuse(AF_INET, SOCK_STREAM, "127.0.0.1", PORT); + if (test__start_subtest("tcpv6")) + run_assign_reuse(AF_INET6, SOCK_STREAM, "::1", PORT); + if (test__start_subtest("udpv4")) + run_assign_reuse(AF_INET, SOCK_DGRAM, "127.0.0.1", PORT); + if (test__start_subtest("udpv6")) + run_assign_reuse(AF_INET6, SOCK_DGRAM, "::1", PORT); + +cleanup: + close_netns(tok); + SYS_NOFAIL("ip netns delete %s", NS_TEST); +out: + return; +} diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c new file mode 100644 index 000000000000..57c814f5f6a7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <net/if.h> +#include <linux/netfilter.h> +#include <network_helpers.h> +#include "ip_check_defrag.skel.h" +#include "ip_check_defrag_frags.h" + +/* + * This selftest spins up a client and an echo server, each in their own + * network namespace. The client will send a fragmented message to the server. + * The prog attached to the server will shoot down any fragments. Thus, if + * the server is able to correctly echo back the message to the client, we will + * have verified that netfilter is reassembling packets for us. + * + * Topology: + * ========= + * NS0 | NS1 + * | + * client | server + * ---------- | ---------- + * | veth0 | --------- | veth1 | + * ---------- peer ---------- + * | + * | with bpf + */ + +#define NS0 "defrag_ns0" +#define NS1 "defrag_ns1" +#define VETH0 "veth0" +#define VETH1 "veth1" +#define VETH0_ADDR "172.16.1.100" +#define VETH0_ADDR6 "fc00::100" +/* The following constants must stay in sync with `generate_udp_fragments.py` */ +#define VETH1_ADDR "172.16.1.200" +#define VETH1_ADDR6 "fc00::200" +#define CLIENT_PORT 48878 +#define SERVER_PORT 48879 +#define MAGIC_MESSAGE "THIS IS THE ORIGINAL MESSAGE, PLEASE REASSEMBLE ME" + +static int setup_topology(bool ipv6) +{ + bool up; + int i; + + SYS(fail, "ip netns add " NS0); + SYS(fail, "ip netns add " NS1); + SYS(fail, "ip link add " VETH0 " netns " NS0 " type veth peer name " VETH1 " netns " NS1); + if (ipv6) { + SYS(fail, "ip -6 -net " NS0 " addr add " VETH0_ADDR6 "/64 dev " VETH0 " nodad"); + SYS(fail, "ip -6 -net " NS1 " addr add " VETH1_ADDR6 "/64 dev " VETH1 " nodad"); + } else { + SYS(fail, "ip -net " NS0 " addr add " VETH0_ADDR "/24 dev " VETH0); + SYS(fail, "ip -net " NS1 " addr add " VETH1_ADDR "/24 dev " VETH1); + } + SYS(fail, "ip -net " NS0 " link set dev " VETH0 " up"); + SYS(fail, "ip -net " NS1 " link set dev " VETH1 " up"); + + /* Wait for up to 5s for links to come up */ + for (i = 0; i < 5; ++i) { + if (ipv6) + up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null"); + else + up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null"); + + if (up) + break; + } + + return 0; +fail: + return -1; +} + +static void cleanup_topology(void) +{ + SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " NS0); + SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " NS1); +} + +static int attach(struct ip_check_defrag *skel, bool ipv6) +{ + LIBBPF_OPTS(bpf_netfilter_opts, opts, + .pf = ipv6 ? NFPROTO_IPV6 : NFPROTO_IPV4, + .priority = 42, + .flags = BPF_F_NETFILTER_IP_DEFRAG); + struct nstoken *nstoken; + int err = -1; + + nstoken = open_netns(NS1); + + skel->links.defrag = bpf_program__attach_netfilter(skel->progs.defrag, &opts); + if (!ASSERT_OK_PTR(skel->links.defrag, "program attach")) + goto out; + + err = 0; +out: + close_netns(nstoken); + return err; +} + +static int send_frags(int client) +{ + struct sockaddr_storage saddr; + struct sockaddr *saddr_p; + socklen_t saddr_len; + int err; + + saddr_p = (struct sockaddr *)&saddr; + err = make_sockaddr(AF_INET, VETH1_ADDR, SERVER_PORT, &saddr, &saddr_len); + if (!ASSERT_OK(err, "make_sockaddr")) + return -1; + + err = sendto(client, frag_0, sizeof(frag_0), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_0")) + return -1; + + err = sendto(client, frag_1, sizeof(frag_1), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_1")) + return -1; + + err = sendto(client, frag_2, sizeof(frag_2), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag_2")) + return -1; + + return 0; +} + +static int send_frags6(int client) +{ + struct sockaddr_storage saddr; + struct sockaddr *saddr_p; + socklen_t saddr_len; + int err; + + saddr_p = (struct sockaddr *)&saddr; + /* Port needs to be set to 0 for raw ipv6 socket for some reason */ + err = make_sockaddr(AF_INET6, VETH1_ADDR6, 0, &saddr, &saddr_len); + if (!ASSERT_OK(err, "make_sockaddr")) + return -1; + + err = sendto(client, frag6_0, sizeof(frag6_0), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_0")) + return -1; + + err = sendto(client, frag6_1, sizeof(frag6_1), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_1")) + return -1; + + err = sendto(client, frag6_2, sizeof(frag6_2), 0, saddr_p, saddr_len); + if (!ASSERT_GE(err, 0, "sendto frag6_2")) + return -1; + + return 0; +} + +void test_bpf_ip_check_defrag_ok(bool ipv6) +{ + struct network_helper_opts rx_opts = { + .timeout_ms = 1000, + .noconnect = true, + }; + struct network_helper_opts tx_ops = { + .timeout_ms = 1000, + .type = SOCK_RAW, + .proto = IPPROTO_RAW, + .noconnect = true, + }; + struct sockaddr_storage caddr; + struct ip_check_defrag *skel; + struct nstoken *nstoken; + int client_tx_fd = -1; + int client_rx_fd = -1; + socklen_t caddr_len; + int srv_fd = -1; + char buf[1024]; + int len, err; + + skel = ip_check_defrag__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(setup_topology(ipv6), "setup_topology")) + goto out; + + if (!ASSERT_OK(attach(skel, ipv6), "attach")) + goto out; + + /* Start server in ns1 */ + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "setns ns1")) + goto out; + srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0); + close_netns(nstoken); + if (!ASSERT_GE(srv_fd, 0, "start_server")) + goto out; + + /* Open tx raw socket in ns0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops); + close_netns(nstoken); + if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) + goto out; + + /* Open rx socket in ns0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts); + close_netns(nstoken); + if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) + goto out; + + /* Bind rx socket to a premeditated port */ + memset(&caddr, 0, sizeof(caddr)); + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns ns0")) + goto out; + if (ipv6) { + struct sockaddr_in6 *c = (struct sockaddr_in6 *)&caddr; + + c->sin6_family = AF_INET6; + inet_pton(AF_INET6, VETH0_ADDR6, &c->sin6_addr); + c->sin6_port = htons(CLIENT_PORT); + err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); + } else { + struct sockaddr_in *c = (struct sockaddr_in *)&caddr; + + c->sin_family = AF_INET; + inet_pton(AF_INET, VETH0_ADDR, &c->sin_addr); + c->sin_port = htons(CLIENT_PORT); + err = bind(client_rx_fd, (struct sockaddr *)c, sizeof(*c)); + } + close_netns(nstoken); + if (!ASSERT_OK(err, "bind")) + goto out; + + /* Send message in fragments */ + if (ipv6) { + if (!ASSERT_OK(send_frags6(client_tx_fd), "send_frags6")) + goto out; + } else { + if (!ASSERT_OK(send_frags(client_tx_fd), "send_frags")) + goto out; + } + + if (!ASSERT_EQ(skel->bss->shootdowns, 0, "shootdowns")) + goto out; + + /* Receive reassembled msg on server and echo back to client */ + caddr_len = sizeof(caddr); + len = recvfrom(srv_fd, buf, sizeof(buf), 0, (struct sockaddr *)&caddr, &caddr_len); + if (!ASSERT_GE(len, 0, "server recvfrom")) + goto out; + len = sendto(srv_fd, buf, len, 0, (struct sockaddr *)&caddr, caddr_len); + if (!ASSERT_GE(len, 0, "server sendto")) + goto out; + + /* Expect reassembed message to be echoed back */ + len = recvfrom(client_rx_fd, buf, sizeof(buf), 0, NULL, NULL); + if (!ASSERT_EQ(len, sizeof(MAGIC_MESSAGE) - 1, "client short read")) + goto out; + +out: + if (client_rx_fd != -1) + close(client_rx_fd); + if (client_tx_fd != -1) + close(client_tx_fd); + if (srv_fd != -1) + close(srv_fd); + cleanup_topology(); + ip_check_defrag__destroy(skel); +} + +void test_bpf_ip_check_defrag(void) +{ + if (test__start_subtest("v4")) + test_bpf_ip_check_defrag_ok(false); + if (test__start_subtest("v6")) + test_bpf_ip_check_defrag_ok(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c new file mode 100644 index 000000000000..375677c19146 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_ldsx_insn.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates.*/ + +#include <test_progs.h> +#include <network_helpers.h> +#include "test_ldsx_insn.skel.h" + +static void test_map_val_and_probed_memory(void) +{ + struct test_ldsx_insn *skel; + int err; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + return; + + if (skel->rodata->skip) { + test__skip(); + goto out; + } + + bpf_program__set_autoload(skel->progs.rdonly_map_prog, true); + bpf_program__set_autoload(skel->progs.map_val_prog, true); + bpf_program__set_autoload(skel->progs.test_ptr_struct_arg, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto out; + + err = test_ldsx_insn__attach(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__attach")) + goto out; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + + ASSERT_EQ(skel->bss->done1, 1, "done1"); + ASSERT_EQ(skel->bss->ret1, 1, "ret1"); + ASSERT_EQ(skel->bss->done2, 1, "done2"); + ASSERT_EQ(skel->bss->ret2, 1, "ret2"); + ASSERT_EQ(skel->bss->int_member, -1, "int_member"); + +out: + test_ldsx_insn__destroy(skel); +} + +static void test_ctx_member_sign_ext(void) +{ + struct test_ldsx_insn *skel; + int err, fd, cgroup_fd; + char buf[16] = {0}; + socklen_t optlen; + + cgroup_fd = test__join_cgroup("/ldsx_test"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /ldsx_test")) + return; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + goto close_cgroup_fd; + + if (skel->rodata->skip) { + test__skip(); + goto destroy_skel; + } + + bpf_program__set_autoload(skel->progs._getsockopt, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto destroy_skel; + + skel->links._getsockopt = + bpf_program__attach_cgroup(skel->progs._getsockopt, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links._getsockopt, "getsockopt_link")) + goto destroy_skel; + + fd = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(fd, 0, "socket")) + goto destroy_skel; + + optlen = sizeof(buf); + (void)getsockopt(fd, SOL_IP, IP_TTL, buf, &optlen); + + ASSERT_EQ(skel->bss->set_optlen, -1, "optlen"); + ASSERT_EQ(skel->bss->set_retval, -1, "retval"); + + close(fd); +destroy_skel: + test_ldsx_insn__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} + +static void test_ctx_member_narrow_sign_ext(void) +{ + struct test_ldsx_insn *skel; + struct __sk_buff skb = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + int err, prog_fd; + + skel = test_ldsx_insn__open(); + if (!ASSERT_OK_PTR(skel, "test_ldsx_insn__open")) + return; + + if (skel->rodata->skip) { + test__skip(); + goto out; + } + + bpf_program__set_autoload(skel->progs._tc, true); + + err = test_ldsx_insn__load(skel); + if (!ASSERT_OK(err, "test_ldsx_insn__load")) + goto out; + + prog_fd = bpf_program__fd(skel->progs._tc); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + + ASSERT_EQ(skel->bss->set_mark, -2, "set_mark"); + +out: + test_ldsx_insn__destroy(skel); +} + +void test_ldsx_insn(void) +{ + if (test__start_subtest("map_val and probed_memory")) + test_map_val_and_probed_memory(); + if (test__start_subtest("ctx_member_sign_ext")) + test_ctx_member_sign_ext(); + if (test__start_subtest("ctx_member_narrow_sign_ext")) + test_ctx_member_narrow_sign_ext(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c375e59ff28d..e3e68c97b40c 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -11,6 +11,7 @@ #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_bpf_get_stack.skel.h" +#include "verifier_bswap.skel.h" #include "verifier_btf_ctx_access.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" @@ -24,6 +25,7 @@ #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" +#include "verifier_gotol.skel.h" #include "verifier_helper_access_var_len.skel.h" #include "verifier_helper_packet_access.skel.h" #include "verifier_helper_restricted.skel.h" @@ -31,6 +33,7 @@ #include "verifier_int_ptr.skel.h" #include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" +#include "verifier_ldsx.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_loops1.skel.h" #include "verifier_lwt.skel.h" @@ -40,6 +43,7 @@ #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" +#include "verifier_movsx.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" #include "verifier_prevent_map_lookup.skel.h" @@ -51,6 +55,7 @@ #include "verifier_ringbuf.skel.h" #include "verifier_runtime_jit.skel.h" #include "verifier_scalar_ids.skel.h" +#include "verifier_sdiv.skel.h" #include "verifier_search_pruning.skel.h" #include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" @@ -113,6 +118,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_bswap(void) { RUN(verifier_bswap); } void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } @@ -126,6 +132,7 @@ void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_acces void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } +void test_verifier_gotol(void) { RUN(verifier_gotol); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } @@ -133,6 +140,7 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } +void test_verifier_ldsx(void) { RUN(verifier_ldsx); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_loops1(void) { RUN(verifier_loops1); } void test_verifier_lwt(void) { RUN(verifier_lwt); } @@ -142,6 +150,7 @@ void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } +void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } @@ -153,6 +162,7 @@ void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); } +void test_verifier_sdiv(void) { RUN(verifier_sdiv); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index fa3cac5488f5..e6bcb6051402 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +#include "test_xdp_attach_fail.skel.h" #define IFINDEX_LO 1 #define XDP_FLAGS_REPLACE (1U << 4) @@ -85,10 +86,74 @@ out_1: bpf_object__close(obj1); } +#define ERRMSG_LEN 64 + +struct xdp_errmsg { + char msg[ERRMSG_LEN]; +}; + +static void on_xdp_errmsg(void *ctx, int cpu, void *data, __u32 size) +{ + struct xdp_errmsg *ctx_errmg = ctx, *tp_errmsg = data; + + memcpy(&ctx_errmg->msg, &tp_errmsg->msg, ERRMSG_LEN); +} + +static const char tgt_errmsg[] = "Invalid XDP flags for BPF link attachment"; + +static void test_xdp_attach_fail(const char *file) +{ + struct test_xdp_attach_fail *skel = NULL; + struct xdp_errmsg errmsg = {}; + struct perf_buffer *pb = NULL; + struct bpf_object *obj = NULL; + int err, fd_xdp; + + LIBBPF_OPTS(bpf_link_create_opts, opts); + + skel = test_xdp_attach_fail__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_attach_fail__open_and_load")) + goto out_close; + + err = test_xdp_attach_fail__attach(skel); + if (!ASSERT_EQ(err, 0, "test_xdp_attach_fail__attach")) + goto out_close; + + /* set up perf buffer */ + pb = perf_buffer__new(bpf_map__fd(skel->maps.xdp_errmsg_pb), 1, + on_xdp_errmsg, NULL, &errmsg, NULL); + if (!ASSERT_OK_PTR(pb, "perf_buffer__new")) + goto out_close; + + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &fd_xdp); + if (!ASSERT_EQ(err, 0, "bpf_prog_test_load")) + goto out_close; + + opts.flags = 0xFF; // invalid flags to fail to attach XDP prog + err = bpf_link_create(fd_xdp, IFINDEX_LO, BPF_XDP, &opts); + if (!ASSERT_EQ(err, -EINVAL, "bpf_link_create")) + goto out_close; + + /* read perf buffer */ + err = perf_buffer__poll(pb, 100); + if (!ASSERT_GT(err, -1, "perf_buffer__poll")) + goto out_close; + + ASSERT_STRNEQ((const char *) errmsg.msg, tgt_errmsg, + 42 /* strlen(tgt_errmsg) */, "check error message"); + +out_close: + perf_buffer__free(pb); + bpf_object__close(obj); + test_xdp_attach_fail__destroy(skel); +} + void serial_test_xdp_attach(void) { if (test__start_subtest("xdp_attach")) test_xdp_attach("./test_xdp.bpf.o"); if (test__start_subtest("xdp_attach_dynptr")) test_xdp_attach("./test_xdp_dynptr.bpf.o"); + if (test__start_subtest("xdp_attach_failed")) + test_xdp_attach_fail("./xdp_dummy.bpf.o"); } diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c new file mode 100644 index 000000000000..1c2b6c1616b0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include "bpf_tracing_net.h" + +#define NF_DROP 0 +#define NF_ACCEPT 1 +#define ETH_P_IP 0x0800 +#define ETH_P_IPV6 0x86DD +#define IP_MF 0x2000 +#define IP_OFFSET 0x1FFF +#define NEXTHDR_FRAGMENT 44 + +extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; +extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, + void *buffer, uint32_t buffer__sz) __ksym; + +volatile int shootdowns = 0; + +static bool is_frag_v4(struct iphdr *iph) +{ + int offset; + int flags; + + offset = bpf_ntohs(iph->frag_off); + flags = offset & ~IP_OFFSET; + offset &= IP_OFFSET; + offset <<= 3; + + return (flags & IP_MF) || offset; +} + +static bool is_frag_v6(struct ipv6hdr *ip6h) +{ + /* Simplifying assumption that there are no extension headers + * between fixed header and fragmentation header. This assumption + * is only valid in this test case. It saves us the hassle of + * searching all potential extension headers. + */ + return ip6h->nexthdr == NEXTHDR_FRAGMENT; +} + +static int handle_v4(struct sk_buff *skb) +{ + struct bpf_dynptr ptr; + u8 iph_buf[20] = {}; + struct iphdr *iph; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) + return NF_DROP; + + iph = bpf_dynptr_slice(&ptr, 0, iph_buf, sizeof(iph_buf)); + if (!iph) + return NF_DROP; + + /* Shootdown any frags */ + if (is_frag_v4(iph)) { + shootdowns++; + return NF_DROP; + } + + return NF_ACCEPT; +} + +static int handle_v6(struct sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ipv6hdr *ip6h; + u8 ip6h_buf[40] = {}; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) + return NF_DROP; + + ip6h = bpf_dynptr_slice(&ptr, 0, ip6h_buf, sizeof(ip6h_buf)); + if (!ip6h) + return NF_DROP; + + /* Shootdown any frags */ + if (is_frag_v6(ip6h)) { + shootdowns++; + return NF_DROP; + } + + return NF_ACCEPT; +} + +SEC("netfilter") +int defrag(struct bpf_nf_ctx *ctx) +{ + struct sk_buff *skb = ctx->skb; + + switch (bpf_ntohs(skb->protocol)) { + case ETH_P_IP: + return handle_v4(skb); + case ETH_P_IPV6: + return handle_v6(skb); + default: + return NF_ACCEPT; + } +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_assign_reuse.c b/tools/testing/selftests/bpf/progs/test_assign_reuse.c new file mode 100644 index 000000000000..4f2e2321ea06 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_assign_reuse.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include <stdbool.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include <bpf/bpf_endian.h> +#include <bpf/bpf_helpers.h> +#include <linux/pkt_cls.h> + +char LICENSE[] SEC("license") = "GPL"; + +__u64 sk_cookie_seen; +__u64 reuseport_executed; +union { + struct tcphdr tcp; + struct udphdr udp; +} headers; + +const volatile __u16 dest_port; + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} sk_map SEC(".maps"); + +SEC("sk_reuseport") +int reuse_accept(struct sk_reuseport_md *ctx) +{ + reuseport_executed++; + + if (ctx->ip_protocol == IPPROTO_TCP) { + if (ctx->data + sizeof(headers.tcp) > ctx->data_end) + return SK_DROP; + + if (__builtin_memcmp(&headers.tcp, ctx->data, sizeof(headers.tcp)) != 0) + return SK_DROP; + } else if (ctx->ip_protocol == IPPROTO_UDP) { + if (ctx->data + sizeof(headers.udp) > ctx->data_end) + return SK_DROP; + + if (__builtin_memcmp(&headers.udp, ctx->data, sizeof(headers.udp)) != 0) + return SK_DROP; + } else { + return SK_DROP; + } + + sk_cookie_seen = bpf_get_socket_cookie(ctx->sk); + return SK_PASS; +} + +SEC("sk_reuseport") +int reuse_drop(struct sk_reuseport_md *ctx) +{ + reuseport_executed++; + sk_cookie_seen = 0; + return SK_DROP; +} + +static int +assign_sk(struct __sk_buff *skb) +{ + int zero = 0, ret = 0; + struct bpf_sock *sk; + + sk = bpf_map_lookup_elem(&sk_map, &zero); + if (!sk) + return TC_ACT_SHOT; + ret = bpf_sk_assign(skb, sk, 0); + bpf_sk_release(sk); + return ret ? TC_ACT_SHOT : TC_ACT_OK; +} + +static bool +maybe_assign_tcp(struct __sk_buff *skb, struct tcphdr *th) +{ + if (th + 1 > (void *)(long)(skb->data_end)) + return TC_ACT_SHOT; + + if (!th->syn || th->ack || th->dest != bpf_htons(dest_port)) + return TC_ACT_OK; + + __builtin_memcpy(&headers.tcp, th, sizeof(headers.tcp)); + return assign_sk(skb); +} + +static bool +maybe_assign_udp(struct __sk_buff *skb, struct udphdr *uh) +{ + if (uh + 1 > (void *)(long)(skb->data_end)) + return TC_ACT_SHOT; + + if (uh->dest != bpf_htons(dest_port)) + return TC_ACT_OK; + + __builtin_memcpy(&headers.udp, uh, sizeof(headers.udp)); + return assign_sk(skb); +} + +SEC("tc") +int tc_main(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ethhdr *eth; + + eth = (struct ethhdr *)(data); + if (eth + 1 > data_end) + return TC_ACT_SHOT; + + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth)); + + if (iph + 1 > data_end) + return TC_ACT_SHOT; + + if (iph->protocol == IPPROTO_TCP) + return maybe_assign_tcp(skb, (struct tcphdr *)(iph + 1)); + else if (iph->protocol == IPPROTO_UDP) + return maybe_assign_udp(skb, (struct udphdr *)(iph + 1)); + else + return TC_ACT_SHOT; + } else { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth)); + + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + + if (ip6h->nexthdr == IPPROTO_TCP) + return maybe_assign_tcp(skb, (struct tcphdr *)(ip6h + 1)); + else if (ip6h->nexthdr == IPPROTO_UDP) + return maybe_assign_udp(skb, (struct udphdr *)(ip6h + 1)); + else + return TC_ACT_SHOT; + } +} diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h index 76eab0aacba0..233b089d1fba 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h @@ -12,6 +12,15 @@ #include <linux/ipv6.h> #include <linux/udp.h> +/* offsetof() is used in static asserts, and the libbpf-redefined CO-RE + * friendly version breaks compilation for older clang versions <= 15 + * when invoked in a static assert. Restore original here. + */ +#ifdef offsetof +#undef offsetof +#define offsetof(type, member) __builtin_offsetof(type, member) +#endif + struct gre_base_hdr { uint16_t flags; uint16_t protocol; diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c new file mode 100644 index 000000000000..321abf862801 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 +const volatile int skip = 0; +#else +const volatile int skip = 1; +#endif + +volatile const short val1 = -1; +volatile const int val2 = -1; +short val3 = -1; +int val4 = -1; +int done1, done2, ret1, ret2; + +SEC("?raw_tp/sys_enter") +int rdonly_map_prog(const void *ctx) +{ + if (done1) + return 0; + + done1 = 1; + /* val1/val2 readonly map */ + if (val1 == val2) + ret1 = 1; + return 0; + +} + +SEC("?raw_tp/sys_enter") +int map_val_prog(const void *ctx) +{ + if (done2) + return 0; + + done2 = 1; + /* val1/val2 regular read/write map */ + if (val3 == val4) + ret2 = 1; + return 0; + +} + +struct bpf_testmod_struct_arg_1 { + int a; +}; + +long long int_member; + +SEC("?fentry/bpf_testmod_test_arg_ptr_to_struct") +int BPF_PROG2(test_ptr_struct_arg, struct bpf_testmod_struct_arg_1 *, p) +{ + /* probed memory access */ + int_member = p->a; + return 0; +} + +long long set_optlen, set_retval; + +SEC("?cgroup/getsockopt") +int _getsockopt(volatile struct bpf_sockopt *ctx) +{ + int old_optlen, old_retval; + + old_optlen = ctx->optlen; + old_retval = ctx->retval; + + ctx->optlen = -1; + ctx->retval = -1; + + /* sign extension for ctx member */ + set_optlen = ctx->optlen; + set_retval = ctx->retval; + + ctx->optlen = old_optlen; + ctx->retval = old_retval; + + return 0; +} + +long long set_mark; + +SEC("?tc") +int _tc(volatile struct __sk_buff *skb) +{ + long long tmp_mark; + int old_mark; + + old_mark = skb->mark; + + skb->mark = 0xf6fe; + + /* narrowed sign extension for ctx member */ +#if __clang_major__ >= 18 + /* force narrow one-byte signed load. Otherwise, compiler may + * generate a 32-bit unsigned load followed by an s8 movsx. + */ + asm volatile ("r1 = *(s8 *)(%[ctx] + %[off_mark])\n\t" + "%[tmp_mark] = r1" + : [tmp_mark]"=r"(tmp_mark) + : [ctx]"r"(skb), + [off_mark]"i"(offsetof(struct __sk_buff, mark)) + : "r1"); +#else + tmp_mark = (char)skb->mark; +#endif + set_mark = tmp_mark; + + skb->mark = old_mark; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c b/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c new file mode 100644 index 000000000000..2ff1b596e87e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_attach_fail.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Leon Hwang */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +#define ERRMSG_LEN 64 + +struct xdp_errmsg { + char msg[ERRMSG_LEN]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __type(key, int); + __type(value, int); +} xdp_errmsg_pb SEC(".maps"); + +struct xdp_attach_error_ctx { + unsigned long unused; + + /* + * bpf does not support tracepoint __data_loc directly. + * + * Actually, this field is a 32 bit integer whose value encodes + * information on where to find the actual data. The first 2 bytes is + * the size of the data. The last 2 bytes is the offset from the start + * of the tracepoint struct where the data begins. + * -- https://github.com/iovisor/bpftrace/pull/1542 + */ + __u32 msg; // __data_loc char[] msg; +}; + +/* + * Catch the error message at the tracepoint. + */ + +SEC("tp/xdp/bpf_xdp_link_attach_failed") +int tp__xdp__bpf_xdp_link_attach_failed(struct xdp_attach_error_ctx *ctx) +{ + char *msg = (void *)(__u64) ((void *) ctx + (__u16) ctx->msg); + struct xdp_errmsg errmsg = {}; + + bpf_probe_read_kernel_str(&errmsg.msg, ERRMSG_LEN, msg); + bpf_perf_event_output(ctx, &xdp_errmsg_pb, BPF_F_CURRENT_CPU, &errmsg, + ERRMSG_LEN); + return 0; +} + +/* + * Reuse the XDP program in xdp_dummy.c. + */ + +char LICENSE[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c new file mode 100644 index 000000000000..724bb38988b5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 + +SEC("socket") +__description("BSWAP, 16") +__success __success_unpriv __retval(0x23ff) +__naked void bswap_16(void) +{ + asm volatile (" \ + r0 = 0xff23; \ + r0 = bswap16 r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("BSWAP, 32") +__success __success_unpriv __retval(0x23ff0000) +__naked void bswap_32(void) +{ + asm volatile (" \ + r0 = 0xff23; \ + r0 = bswap32 r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("BSWAP, 64") +__success __success_unpriv __retval(0x34ff12ff) +__naked void bswap_64(void) +{ + asm volatile (" \ + r0 = %[u64_val] ll; \ + r0 = bswap64 r0; \ + exit; \ +" : + : [u64_val]"i"(0xff12ff34ff56ff78ull) + : __clobber_all); +} + +#else + +SEC("socket") +__description("cpuv4 is not supported by compiler or jit, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c new file mode 100644 index 000000000000..ce48f7757db2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 + +SEC("socket") +__description("gotol, small_imm") +__success __success_unpriv __retval(1) +__naked void gotol_small_imm(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 == 0 goto l0_%=; \ + gotol l1_%=; \ +l2_%=: \ + gotol l3_%=; \ +l1_%=: \ + r0 = 1; \ + gotol l2_%=; \ +l0_%=: \ + r0 = 2; \ +l3_%=: \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +#else + +SEC("socket") +__description("cpuv4 is not supported by compiler or jit, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c new file mode 100644 index 000000000000..3c3d1bddd67f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 + +SEC("socket") +__description("LDSX, S8") +__success __success_unpriv __retval(-2) +__naked void ldsx_s8(void) +{ + asm volatile (" \ + r1 = 0x3fe; \ + *(u64 *)(r10 - 8) = r1; \ + r0 = *(s8 *)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("LDSX, S16") +__success __success_unpriv __retval(-2) +__naked void ldsx_s16(void) +{ + asm volatile (" \ + r1 = 0x3fffe; \ + *(u64 *)(r10 - 8) = r1; \ + r0 = *(s16 *)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("LDSX, S32") +__success __success_unpriv __retval(-1) +__naked void ldsx_s32(void) +{ + asm volatile (" \ + r1 = 0xfffffffe; \ + *(u64 *)(r10 - 8) = r1; \ + r0 = *(s32 *)(r10 - 8); \ + r0 >>= 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("LDSX, S8 range checking, privileged") +__log_level(2) __success __retval(1) +__msg("R1_w=scalar(smin=-128,smax=127)") +__naked void ldsx_s8_range_priv(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + *(u64 *)(r10 - 8) = r0; \ + r1 = *(s8 *)(r10 - 8); \ + /* r1 with s8 range */ \ + if r1 s> 0x7f goto l0_%=; \ + if r1 s< -0x80 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("LDSX, S16 range checking") +__success __success_unpriv __retval(1) +__naked void ldsx_s16_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + *(u64 *)(r10 - 8) = r0; \ + r1 = *(s16 *)(r10 - 8); \ + /* r1 with s16 range */ \ + if r1 s> 0x7fff goto l0_%=; \ + if r1 s< -0x8000 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("LDSX, S32 range checking") +__success __success_unpriv __retval(1) +__naked void ldsx_s32_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + *(u64 *)(r10 - 8) = r0; \ + r1 = *(s32 *)(r10 - 8); \ + /* r1 with s16 range */ \ + if r1 s> 0x7fffFFFF goto l0_%=; \ + if r1 s< -0x80000000 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +#else + +SEC("socket") +__description("cpuv4 is not supported by compiler or jit, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c new file mode 100644 index 000000000000..9568089932d7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 + +SEC("socket") +__description("MOV32SX, S8") +__success __success_unpriv __retval(0x23) +__naked void mov32sx_s8(void) +{ + asm volatile (" \ + w0 = 0xff23; \ + w0 = (s8)w0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOV32SX, S16") +__success __success_unpriv __retval(0xFFFFff23) +__naked void mov32sx_s16(void) +{ + asm volatile (" \ + w0 = 0xff23; \ + w0 = (s16)w0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S8") +__success __success_unpriv __retval(-2) +__naked void mov64sx_s8(void) +{ + asm volatile (" \ + r0 = 0x1fe; \ + r0 = (s8)r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S16") +__success __success_unpriv __retval(0xf23) +__naked void mov64sx_s16(void) +{ + asm volatile (" \ + r0 = 0xf0f23; \ + r0 = (s16)r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S32") +__success __success_unpriv __retval(-1) +__naked void mov64sx_s32(void) +{ + asm volatile (" \ + r0 = 0xfffffffe; \ + r0 = (s32)r0; \ + r0 >>= 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOV32SX, S8, range_check") +__success __success_unpriv __retval(1) +__naked void mov32sx_s8_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = (s8)w0; \ + /* w1 with s8 range */ \ + if w1 s> 0x7f goto l0_%=; \ + if w1 s< -0x80 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("MOV32SX, S16, range_check") +__success __success_unpriv __retval(1) +__naked void mov32sx_s16_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + w1 = (s16)w0; \ + /* w1 with s16 range */ \ + if w1 s> 0x7fff goto l0_%=; \ + if w1 s< -0x80ff goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("MOV32SX, S16, range_check 2") +__success __success_unpriv __retval(1) +__naked void mov32sx_s16_range_2(void) +{ + asm volatile (" \ + r1 = 65535; \ + w2 = (s16)w1; \ + r2 >>= 1; \ + if r2 != 0x7fffFFFF goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 0; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S8, range_check") +__success __success_unpriv __retval(1) +__naked void mov64sx_s8_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = (s8)r0; \ + /* r1 with s8 range */ \ + if r1 s> 0x7f goto l0_%=; \ + if r1 s< -0x80 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S16, range_check") +__success __success_unpriv __retval(1) +__naked void mov64sx_s16_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = (s16)r0; \ + /* r1 with s16 range */ \ + if r1 s> 0x7fff goto l0_%=; \ + if r1 s< -0x8000 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("MOV64SX, S32, range_check") +__success __success_unpriv __retval(1) +__naked void mov64sx_s32_range(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = (s32)r0; \ + /* r1 with s32 range */ \ + if r1 s> 0x7fffffff goto l0_%=; \ + if r1 s< -0x80000000 goto l0_%=; \ + r0 = 1; \ +l1_%=: \ + exit; \ +l0_%=: \ + r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +#else + +SEC("socket") +__description("cpuv4 is not supported by compiler or jit, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c new file mode 100644 index 000000000000..f61a9a1058c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -0,0 +1,781 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#if defined(__TARGET_ARCH_x86) && __clang_major__ >= 18 + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 1") +__success __success_unpriv __retval(-20) +__naked void sdiv32_non_zero_imm_1(void) +{ + asm volatile (" \ + w0 = -41; \ + w0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 2") +__success __success_unpriv __retval(-20) +__naked void sdiv32_non_zero_imm_2(void) +{ + asm volatile (" \ + w0 = 41; \ + w0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 3") +__success __success_unpriv __retval(20) +__naked void sdiv32_non_zero_imm_3(void) +{ + asm volatile (" \ + w0 = -41; \ + w0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 4") +__success __success_unpriv __retval(-21) +__naked void sdiv32_non_zero_imm_4(void) +{ + asm volatile (" \ + w0 = -42; \ + w0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 5") +__success __success_unpriv __retval(-21) +__naked void sdiv32_non_zero_imm_5(void) +{ + asm volatile (" \ + w0 = 42; \ + w0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 6") +__success __success_unpriv __retval(21) +__naked void sdiv32_non_zero_imm_6(void) +{ + asm volatile (" \ + w0 = -42; \ + w0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 7") +__success __success_unpriv __retval(21) +__naked void sdiv32_non_zero_imm_7(void) +{ + asm volatile (" \ + w0 = 42; \ + w0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero imm divisor, check 8") +__success __success_unpriv __retval(20) +__naked void sdiv32_non_zero_imm_8(void) +{ + asm volatile (" \ + w0 = 41; \ + w0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 1") +__success __success_unpriv __retval(-20) +__naked void sdiv32_non_zero_reg_1(void) +{ + asm volatile (" \ + w0 = -41; \ + w1 = 2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 2") +__success __success_unpriv __retval(-20) +__naked void sdiv32_non_zero_reg_2(void) +{ + asm volatile (" \ + w0 = 41; \ + w1 = -2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 3") +__success __success_unpriv __retval(20) +__naked void sdiv32_non_zero_reg_3(void) +{ + asm volatile (" \ + w0 = -41; \ + w1 = -2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 4") +__success __success_unpriv __retval(-21) +__naked void sdiv32_non_zero_reg_4(void) +{ + asm volatile (" \ + w0 = -42; \ + w1 = 2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 5") +__success __success_unpriv __retval(-21) +__naked void sdiv32_non_zero_reg_5(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = -2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 6") +__success __success_unpriv __retval(21) +__naked void sdiv32_non_zero_reg_6(void) +{ + asm volatile (" \ + w0 = -42; \ + w1 = -2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 7") +__success __success_unpriv __retval(21) +__naked void sdiv32_non_zero_reg_7(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, non-zero reg divisor, check 8") +__success __success_unpriv __retval(20) +__naked void sdiv32_non_zero_reg_8(void) +{ + asm volatile (" \ + w0 = 41; \ + w1 = 2; \ + w0 s/= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 1") +__success __success_unpriv __retval(-20) +__naked void sdiv64_non_zero_imm_1(void) +{ + asm volatile (" \ + r0 = -41; \ + r0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 2") +__success __success_unpriv __retval(-20) +__naked void sdiv64_non_zero_imm_2(void) +{ + asm volatile (" \ + r0 = 41; \ + r0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 3") +__success __success_unpriv __retval(20) +__naked void sdiv64_non_zero_imm_3(void) +{ + asm volatile (" \ + r0 = -41; \ + r0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 4") +__success __success_unpriv __retval(-21) +__naked void sdiv64_non_zero_imm_4(void) +{ + asm volatile (" \ + r0 = -42; \ + r0 s/= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 5") +__success __success_unpriv __retval(-21) +__naked void sdiv64_non_zero_imm_5(void) +{ + asm volatile (" \ + r0 = 42; \ + r0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero imm divisor, check 6") +__success __success_unpriv __retval(21) +__naked void sdiv64_non_zero_imm_6(void) +{ + asm volatile (" \ + r0 = -42; \ + r0 s/= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 1") +__success __success_unpriv __retval(-20) +__naked void sdiv64_non_zero_reg_1(void) +{ + asm volatile (" \ + r0 = -41; \ + r1 = 2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 2") +__success __success_unpriv __retval(-20) +__naked void sdiv64_non_zero_reg_2(void) +{ + asm volatile (" \ + r0 = 41; \ + r1 = -2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 3") +__success __success_unpriv __retval(20) +__naked void sdiv64_non_zero_reg_3(void) +{ + asm volatile (" \ + r0 = -41; \ + r1 = -2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 4") +__success __success_unpriv __retval(-21) +__naked void sdiv64_non_zero_reg_4(void) +{ + asm volatile (" \ + r0 = -42; \ + r1 = 2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 5") +__success __success_unpriv __retval(-21) +__naked void sdiv64_non_zero_reg_5(void) +{ + asm volatile (" \ + r0 = 42; \ + r1 = -2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, non-zero reg divisor, check 6") +__success __success_unpriv __retval(21) +__naked void sdiv64_non_zero_reg_6(void) +{ + asm volatile (" \ + r0 = -42; \ + r1 = -2; \ + r0 s/= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 1") +__success __success_unpriv __retval(-1) +__naked void smod32_non_zero_imm_1(void) +{ + asm volatile (" \ + w0 = -41; \ + w0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 2") +__success __success_unpriv __retval(1) +__naked void smod32_non_zero_imm_2(void) +{ + asm volatile (" \ + w0 = 41; \ + w0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 3") +__success __success_unpriv __retval(-1) +__naked void smod32_non_zero_imm_3(void) +{ + asm volatile (" \ + w0 = -41; \ + w0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 4") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_imm_4(void) +{ + asm volatile (" \ + w0 = -42; \ + w0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 5") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_imm_5(void) +{ + asm volatile (" \ + w0 = 42; \ + w0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero imm divisor, check 6") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_imm_6(void) +{ + asm volatile (" \ + w0 = -42; \ + w0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 1") +__success __success_unpriv __retval(-1) +__naked void smod32_non_zero_reg_1(void) +{ + asm volatile (" \ + w0 = -41; \ + w1 = 2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 2") +__success __success_unpriv __retval(1) +__naked void smod32_non_zero_reg_2(void) +{ + asm volatile (" \ + w0 = 41; \ + w1 = -2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 3") +__success __success_unpriv __retval(-1) +__naked void smod32_non_zero_reg_3(void) +{ + asm volatile (" \ + w0 = -41; \ + w1 = -2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 4") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_reg_4(void) +{ + asm volatile (" \ + w0 = -42; \ + w1 = 2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 5") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_reg_5(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = -2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, non-zero reg divisor, check 6") +__success __success_unpriv __retval(0) +__naked void smod32_non_zero_reg_6(void) +{ + asm volatile (" \ + w0 = -42; \ + w1 = -2; \ + w0 s%%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 1") +__success __success_unpriv __retval(-1) +__naked void smod64_non_zero_imm_1(void) +{ + asm volatile (" \ + r0 = -41; \ + r0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 2") +__success __success_unpriv __retval(1) +__naked void smod64_non_zero_imm_2(void) +{ + asm volatile (" \ + r0 = 41; \ + r0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 3") +__success __success_unpriv __retval(-1) +__naked void smod64_non_zero_imm_3(void) +{ + asm volatile (" \ + r0 = -41; \ + r0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 4") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_imm_4(void) +{ + asm volatile (" \ + r0 = -42; \ + r0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 5") +__success __success_unpriv __retval(-0) +__naked void smod64_non_zero_imm_5(void) +{ + asm volatile (" \ + r0 = 42; \ + r0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 6") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_imm_6(void) +{ + asm volatile (" \ + r0 = -42; \ + r0 s%%= -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 7") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_imm_7(void) +{ + asm volatile (" \ + r0 = 42; \ + r0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero imm divisor, check 8") +__success __success_unpriv __retval(1) +__naked void smod64_non_zero_imm_8(void) +{ + asm volatile (" \ + r0 = 41; \ + r0 s%%= 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 1") +__success __success_unpriv __retval(-1) +__naked void smod64_non_zero_reg_1(void) +{ + asm volatile (" \ + r0 = -41; \ + r1 = 2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 2") +__success __success_unpriv __retval(1) +__naked void smod64_non_zero_reg_2(void) +{ + asm volatile (" \ + r0 = 41; \ + r1 = -2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 3") +__success __success_unpriv __retval(-1) +__naked void smod64_non_zero_reg_3(void) +{ + asm volatile (" \ + r0 = -41; \ + r1 = -2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 4") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_reg_4(void) +{ + asm volatile (" \ + r0 = -42; \ + r1 = 2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 5") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_reg_5(void) +{ + asm volatile (" \ + r0 = 42; \ + r1 = -2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 6") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_reg_6(void) +{ + asm volatile (" \ + r0 = -42; \ + r1 = -2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 7") +__success __success_unpriv __retval(0) +__naked void smod64_non_zero_reg_7(void) +{ + asm volatile (" \ + r0 = 42; \ + r1 = 2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, non-zero reg divisor, check 8") +__success __success_unpriv __retval(1) +__naked void smod64_non_zero_reg_8(void) +{ + asm volatile (" \ + r0 = 41; \ + r1 = 2; \ + r0 s%%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV32, zero divisor") +__success __success_unpriv __retval(0) +__naked void sdiv32_zero_divisor(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = -1; \ + w2 s/= w1; \ + w0 = w2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SDIV64, zero divisor") +__success __success_unpriv __retval(0) +__naked void sdiv64_zero_divisor(void) +{ + asm volatile (" \ + r0 = 42; \ + r1 = 0; \ + r2 = -1; \ + r2 s/= r1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD32, zero divisor") +__success __success_unpriv __retval(-1) +__naked void smod32_zero_divisor(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = -1; \ + w2 s%%= w1; \ + w0 = w2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("SMOD64, zero divisor") +__success __success_unpriv __retval(-1) +__naked void smod64_zero_divisor(void) +{ + asm volatile (" \ + r0 = 42; \ + r1 = 0; \ + r2 = -1; \ + r2 s%%= r1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +#else + +SEC("socket") +__description("cpuv4 is not supported by compiler or jit, use a dummy test") +__success +int dummy_test(void) +{ + return 0; +} + +#endif + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c index 071dbc889e8c..bd928a72ad73 100644 --- a/tools/testing/selftests/bpf/verifier/basic_instr.c +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -176,11 +176,11 @@ .retval = 1, }, { - "invalid 64-bit BPF_END", + "invalid 64-bit BPF_END with BPF_TO_BE", .insns = { BPF_MOV32_IMM(BPF_REG_0, 0), { - .code = BPF_ALU64 | BPF_END | BPF_TO_LE, + .code = BPF_ALU64 | BPF_END | BPF_TO_BE, .dst_reg = BPF_REG_0, .src_reg = 0, .off = 0, @@ -188,7 +188,7 @@ }, BPF_EXIT_INSN(), }, - .errstr = "unknown opcode d7", + .errstr = "unknown opcode df", .result = REJECT, }, { diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 3ff436706640..2827f2d7cf30 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -2076,7 +2076,7 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char * err = bpf_xdp_query(ifobj->ifindex, XDP_FLAGS_DRV_MODE, &query_opts); if (err) { - ksft_print_msg("Error querrying XDP capabilities\n"); + ksft_print_msg("Error querying XDP capabilities\n"); exit_with_error(-err); } if (query_opts.feature_flags & NETDEV_XDP_ACT_RX_SG) |