diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
126 files changed, 4218 insertions, 1630 deletions
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index e9c377001f93..e2a2c46c008b 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -18,7 +18,6 @@ feature urandom_read test_sockmap test_lirc_mode2_user -test_flow_dissector flow_dissector_load test_tcpnotify_user test_libbpf diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 7eeb3cbe18c7..87551628e112 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -41,7 +41,7 @@ srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif -CFLAGS += -g $(OPT_FLAGS) -rdynamic \ +CFLAGS += -g $(OPT_FLAGS) -rdynamic -std=gnu11 \ -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ @@ -54,21 +54,6 @@ PCAP_LIBS := $(shell $(PKG_CONFIG) --libs libpcap 2>/dev/null) LDLIBS += $(PCAP_LIBS) CFLAGS += $(PCAP_CFLAGS) -# The following tests perform type punning and they may break strict -# aliasing rules, which are exploited by both GCC and clang by default -# while optimizing. This can lead to broken programs. -progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing -progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing -progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing -progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing -progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing -progs/syscall.c-CFLAGS := -fno-strict-aliasing -progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing -progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing -progs/timer_crash.c-CFLAGS := -fno-strict-aliasing -progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing -progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing - # Some utility functions use LLVM libraries jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS) @@ -103,18 +88,6 @@ progs/btf_dump_test_case_packing.c-bpf_gcc-CFLAGS := -Wno-error progs/btf_dump_test_case_padding.c-bpf_gcc-CFLAGS := -Wno-error progs/btf_dump_test_case_syntax.c-bpf_gcc-CFLAGS := -Wno-error -# The following tests do type-punning, via the __imm_insn macro, from -# `struct bpf_insn' to long and then uses the value. This triggers an -# "is used uninitialized" warning in GCC due to strict-aliasing -# rules. -progs/verifier_ref_tracking.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_unpriv.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_cgroup_storage.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_ld_ind.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_map_ret_val.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_spill_fill.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_subprog_precision.c-bpf_gcc-CFLAGS := -fno-strict-aliasing -progs/verifier_uninit.c-bpf_gcc-CFLAGS := -fno-strict-aliasing endif ifneq ($(CLANG_CPUV4),) @@ -127,13 +100,10 @@ TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c) # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ - test_xdp_redirect.sh \ test_xdp_redirect_multi.sh \ - test_xdp_meta.sh \ test_tunnel.sh \ test_lwt_seg6local.sh \ test_lirc_mode2.sh \ - test_flow_dissector.sh \ test_xdp_vlan_mode_generic.sh \ test_xdp_vlan_mode_native.sh \ test_lwt_ip_encap.sh \ @@ -151,17 +121,16 @@ TEST_PROGS_EXTENDED := with_addr.sh \ with_tunnels.sh ima_setup.sh verify_sig_setup.sh \ test_xdp_vlan.sh test_bpftool.py +TEST_KMODS := bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ + bpf_test_modorder_y.ko +TEST_KMOD_TARGETS = $(addprefix $(OUTPUT)/,$(TEST_KMODS)) + # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = \ bench \ - bpf_testmod.ko \ - bpf_test_modorder_x.ko \ - bpf_test_modorder_y.ko \ - bpf_test_no_cfi.ko \ flow_dissector_load \ runqslower \ test_cpp \ - test_flow_dissector \ test_lirc_mode2_user \ veristat \ xdp_features \ @@ -184,8 +153,9 @@ override define CLEAN $(Q)$(RM) -r $(TEST_GEN_PROGS) $(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED) $(Q)$(RM) -r $(TEST_GEN_FILES) + $(Q)$(RM) -r $(TEST_KMODS) $(Q)$(RM) -r $(EXTRA_CLEAN) - $(Q)$(MAKE) -C bpf_testmod clean + $(Q)$(MAKE) -C test_kmods clean $(Q)$(MAKE) docs-clean endef @@ -203,9 +173,9 @@ ifeq ($(shell expr $(MAKE_VERSION) \>= 4.4), 1) $(let OUTPUT,$(OUTPUT)/,\ $(eval include ../../../build/Makefile.feature)) else -OUTPUT := $(OUTPUT)/ +override OUTPUT := $(OUTPUT)/ $(eval include ../../../build/Makefile.feature) -OUTPUT := $(patsubst %/,%,$(OUTPUT)) +override OUTPUT := $(patsubst %/,%,$(OUTPUT)) endif endif @@ -251,7 +221,7 @@ endif # to build individual tests. # NOTE: Semicolon at the end is critical to override lib.mk's default static # rule for binaries. -$(notdir $(TEST_GEN_PROGS) \ +$(notdir $(TEST_GEN_PROGS) $(TEST_KMODS) \ $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ; # sort removes libbpf duplicates when not cross-building @@ -305,37 +275,19 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $< -o $@ \ $(shell $(PKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) -$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_testmod \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_testmod/bpf_testmod.ko $@ - -$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_no_cfi \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ +# This should really be a grouped target, but make versions before 4.3 don't +# support that for regular rules. However, pattern matching rules are implicitly +# treated as grouped even with older versions of make, so as a workaround, the +# subst() turns the rule into a pattern matching rule +$(addprefix test_kmods/,$(subst .ko,%ko,$(TEST_KMODS))): $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard test_kmods/Makefile test_kmods/*.[ch]) + $(Q)$(RM) test_kmods/*.ko test_kmods/*.mod.o # force re-compilation + $(Q)$(MAKE) $(submake_extras) -C test_kmods \ + RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ -$(OUTPUT)/bpf_test_modorder_x.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_x/Makefile bpf_test_modorder_x/*.[ch]) +$(TEST_KMOD_TARGETS): $(addprefix test_kmods/,$(TEST_KMODS)) $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_modorder_x/bpf_test_modorder_x.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_x \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_modorder_x/bpf_test_modorder_x.ko $@ - -$(OUTPUT)/bpf_test_modorder_y.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_modorder_y/Makefile bpf_test_modorder_y/*.[ch]) - $(call msg,MOD,,$@) - $(Q)$(RM) bpf_test_modorder_y/bpf_test_modorder_y.ko # force re-compilation - $(Q)$(MAKE) $(submake_extras) -C bpf_test_modorder_y \ - RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) \ - EXTRA_CFLAGS='' EXTRA_LDFLAGS='' - $(Q)cp bpf_test_modorder_y/bpf_test_modorder_y.ko $@ + $(Q)cp test_kmods/$(@F) $@ DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool @@ -480,10 +432,10 @@ $(shell $(1) $(2) -dM -E - </dev/null | grep -E 'MIPS(EL|EB)|_MIPS_SZ(PTR|LONG) endef # Determine target endianness. -IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ +IS_LITTLE_ENDIAN := $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') -MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) -BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb) +MENDIAN:=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) +BPF_TARGET_ENDIAN:=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb) ifneq ($(CROSS_COMPILE),) CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) @@ -493,6 +445,8 @@ CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ -I$(abspath $(OUTPUT)/../usr/include) \ + -std=gnu11 \ + -fno-strict-aliasing \ -Wno-compare-distinct-pointer-types # TODO: enable me -Wsign-compare @@ -760,14 +714,12 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ json_writer.c \ flow_dissector_load.h \ ip_check_defrag_frags.h -TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ - $(OUTPUT)/bpf_test_no_cfi.ko \ - $(OUTPUT)/bpf_test_modorder_x.ko \ - $(OUTPUT)/bpf_test_modorder_y.ko \ +TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ $(OUTPUT)/uprobe_multi \ + $(TEST_KMOD_TARGETS) \ ima_setup.sh \ verify_sig_setup.sh \ $(wildcard progs/btf_dump_test_case_*.c) \ @@ -834,9 +786,12 @@ $(OUTPUT)/xdp_features: xdp_features.c $(OUTPUT)/network_helpers.o $(OUTPUT)/xdp $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ # Make sure we are able to include and link libbpf against c++. +CXXFLAGS += $(CFLAGS) +CXXFLAGS := $(subst -D_GNU_SOURCE=,,$(CXXFLAGS)) +CXXFLAGS := $(subst -std=gnu11,-std=gnu++11,$(CXXFLAGS)) $(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ) $(call msg,CXX,,$@) - $(Q)$(CXX) $(subst -D_GNU_SOURCE=,,$(CFLAGS)) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ + $(Q)$(CXX) $(CXXFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ # Benchmark runner $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ) @@ -894,12 +849,9 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c uprobe_multi.ld EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ - feature bpftool \ + feature bpftool $(TEST_KMOD_TARGETS) \ $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \ - no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ - bpf_test_no_cfi.ko \ - bpf_test_modorder_x.ko \ - bpf_test_modorder_y.ko \ + no_alu32 cpuv4 bpf_gcc \ liburandom_read.so) \ $(OUTPUT)/FEATURE-DUMP.selftests diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs index eb6a4fea8c79..f7f9e7088bb3 100644 --- a/tools/testing/selftests/bpf/Makefile.docs +++ b/tools/testing/selftests/bpf/Makefile.docs @@ -7,12 +7,6 @@ INSTALL ?= install RM ?= rm -f RMDIR ?= rmdir --ignore-fail-on-non-empty -ifeq ($(V),1) - Q = -else - Q = @ -endif - prefix ?= /usr/local mandir ?= $(prefix)/man man2dir = $(mandir)/man2 diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile deleted file mode 100644 index 40b25b98ad1b..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_modorder_x/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_modorder_x.ko - -obj-m += bpf_test_modorder_x.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile b/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile deleted file mode 100644 index 52c3ab9d84e2..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_modorder_y/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_modorder_y.ko - -obj-m += bpf_test_modorder_y.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile deleted file mode 100644 index ed5143b79edf..000000000000 --- a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_test_no_cfi.ko - -obj-m += bpf_test_no_cfi.o - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean - diff --git a/tools/testing/selftests/bpf/bpf_testmod/Makefile b/tools/testing/selftests/bpf/bpf_testmod/Makefile deleted file mode 100644 index 15cb36c4483a..000000000000 --- a/tools/testing/selftests/bpf/bpf_testmod/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -BPF_TESTMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) -KDIR ?= $(abspath $(BPF_TESTMOD_DIR)/../../../../..) - -ifeq ($(V),1) -Q = -else -Q = @ -endif - -MODULES = bpf_testmod.ko - -obj-m += bpf_testmod.o -CFLAGS_bpf_testmod.o = -I$(src) - -all: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) modules - -clean: - +$(Q)make -C $(KDIR) M=$(BPF_TESTMOD_DIR) clean - diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 4ca84c8d9116..c378d5d07e02 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -58,6 +58,7 @@ CONFIG_MPLS=y CONFIG_MPLS_IPTUNNEL=y CONFIG_MPLS_ROUTING=y CONFIG_MPTCP=y +CONFIG_NET_ACT_GACT=y CONFIG_NET_ACT_SKBMOD=y CONFIG_NET_CLS=y CONFIG_NET_CLS_ACT=y diff --git a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c index 66191ae9863c..79c3ccadb962 100644 --- a/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c +++ b/tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c @@ -120,11 +120,12 @@ static void validate_fetch_results(int outer_map_fd, static void fetch_and_validate(int outer_map_fd, struct bpf_map_batch_opts *opts, - __u32 batch_size, bool delete_entries) + __u32 batch_size, bool delete_entries, + bool has_holes) { - __u32 *fetched_keys, *fetched_values, total_fetched = 0; - __u32 batch_key = 0, fetch_count, step_size; - int err, max_entries = OUTER_MAP_ENTRIES; + int err, max_entries = OUTER_MAP_ENTRIES - !!has_holes; + __u32 *fetched_keys, *fetched_values, total_fetched = 0, i; + __u32 batch_key = 0, fetch_count, step_size = batch_size; __u32 value_size = sizeof(__u32); /* Total entries needs to be fetched */ @@ -134,9 +135,8 @@ static void fetch_and_validate(int outer_map_fd, "Memory allocation failed for fetched_keys or fetched_values", "error=%s\n", strerror(errno)); - for (step_size = batch_size; - step_size <= max_entries; - step_size += batch_size) { + /* hash map may not always return full batch */ + for (i = 0; i < OUTER_MAP_ENTRIES; i++) { fetch_count = step_size; err = delete_entries ? bpf_map_lookup_and_delete_batch(outer_map_fd, @@ -155,6 +155,7 @@ static void fetch_and_validate(int outer_map_fd, if (err && errno == ENOSPC) { /* Fetch again with higher batch size */ total_fetched = 0; + step_size += batch_size; continue; } @@ -184,18 +185,19 @@ static void fetch_and_validate(int outer_map_fd, } static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, - enum bpf_map_type inner_map_type) + enum bpf_map_type inner_map_type, + bool has_holes) { + __u32 max_entries = OUTER_MAP_ENTRIES - !!has_holes; __u32 *outer_map_keys, *inner_map_fds; - __u32 max_entries = OUTER_MAP_ENTRIES; LIBBPF_OPTS(bpf_map_batch_opts, opts); __u32 value_size = sizeof(__u32); int batch_size[2] = {5, 10}; __u32 map_index, op_index; int outer_map_fd, ret; - outer_map_keys = calloc(max_entries, value_size); - inner_map_fds = calloc(max_entries, value_size); + outer_map_keys = calloc(OUTER_MAP_ENTRIES, value_size); + inner_map_fds = calloc(OUTER_MAP_ENTRIES, value_size); CHECK((!outer_map_keys || !inner_map_fds), "Memory allocation failed for outer_map_keys or inner_map_fds", "error=%s\n", strerror(errno)); @@ -209,6 +211,24 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, ((outer_map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) ? 9 : 1000) - map_index; + /* This condition is only meaningful for array of maps. + * + * max_entries == OUTER_MAP_ENTRIES - 1 if it is true. Say + * max_entries is short for n, then outer_map_keys looks like: + * + * [n, n-1, ... 2, 1] + * + * We change it to + * + * [n, n-1, ... 2, 0] + * + * So it will leave key 1 as a hole. It will serve to test the + * correctness when batch on an array: a "non-exist" key might be + * actually allocated and returned from key iteration. + */ + if (has_holes) + outer_map_keys[max_entries - 1]--; + /* batch operation - map_update */ ret = bpf_map_update_batch(outer_map_fd, outer_map_keys, inner_map_fds, &max_entries, &opts); @@ -219,15 +239,17 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, /* batch operation - map_lookup */ for (op_index = 0; op_index < 2; ++op_index) fetch_and_validate(outer_map_fd, &opts, - batch_size[op_index], false); + batch_size[op_index], false, + has_holes); /* batch operation - map_lookup_delete */ if (outer_map_type == BPF_MAP_TYPE_HASH_OF_MAPS) fetch_and_validate(outer_map_fd, &opts, - max_entries, true /*delete*/); + max_entries, true /*delete*/, + has_holes); /* close all map fds */ - for (map_index = 0; map_index < max_entries; map_index++) + for (map_index = 0; map_index < OUTER_MAP_ENTRIES; map_index++) close(inner_map_fds[map_index]); close(outer_map_fd); @@ -237,16 +259,20 @@ static void _map_in_map_batch_ops(enum bpf_map_type outer_map_type, void test_map_in_map_batch_ops_array(void) { - _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, false); printf("%s:PASS with inner ARRAY map\n", __func__); - _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, false); printf("%s:PASS with inner HASH map\n", __func__); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY, true); + printf("%s:PASS with inner ARRAY map with holes\n", __func__); + _map_in_map_batch_ops(BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH, true); + printf("%s:PASS with inner HASH map with holes\n", __func__); } void test_map_in_map_batch_ops_hash(void) { - _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY); + _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_ARRAY, false); printf("%s:PASS with inner ARRAY map\n", __func__); - _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH); + _map_in_map_batch_ops(BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH, false); printf("%s:PASS with inner HASH map\n", __func__); } diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 27784946b01b..80844a5fb1fe 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -21,7 +21,7 @@ #include <linux/limits.h> #include <linux/ip.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <netinet/tcp.h> #include <net/if.h> diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 5764155b6d25..ebec8a8d6f81 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -14,6 +14,7 @@ typedef __u16 __sum16; #include <linux/sockios.h> #include <linux/err.h> #include <netinet/tcp.h> +#include <netinet/udp.h> #include <bpf/bpf_endian.h> #include <net/if.h> @@ -105,6 +106,45 @@ static __u16 csum_fold(__u32 csum) return (__u16)~csum; } +static __wsum csum_partial(const void *buf, int len, __wsum sum) +{ + __u16 *p = (__u16 *)buf; + int num_u16 = len >> 1; + int i; + + for (i = 0; i < num_u16; i++) + sum += p[i]; + + return sum; +} + +static inline __sum16 build_ip_csum(struct iphdr *iph) +{ + __u32 sum = 0; + __u16 *p; + + iph->check = 0; + p = (void *)iph; + sum = csum_partial(p, iph->ihl << 2, 0); + + return csum_fold(sum); +} + +/** + * csum_tcpudp_magic - compute IP pseudo-header checksum + * + * Compute the IPv4 pseudo header checksum. The helper can take a + * accumulated sum from the transport layer to accumulate it and directly + * return the transport layer + * + * @saddr: IP source address + * @daddr: IP dest address + * @len: IP data size + * @proto: transport layer protocol + * @csum: The accumulated partial sum to add to the computation + * + * Returns the folded sum + */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __wsum csum) @@ -120,6 +160,21 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, return csum_fold((__u32)s); } +/** + * csum_ipv6_magic - compute IPv6 pseudo-header checksum + * + * Compute the ipv6 pseudo header checksum. The helper can take a + * accumulated sum from the transport layer to accumulate it and directly + * return the transport layer + * + * @saddr: IPv6 source address + * @daddr: IPv6 dest address + * @len: IPv6 data size + * @proto: transport layer protocol + * @csum: The accumulated partial sum to add to the computation + * + * Returns the folded sum + */ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, __u8 proto, @@ -139,6 +194,47 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold((__u32)s); } +/** + * build_udp_v4_csum - compute UDP checksum for UDP over IPv4 + * + * Compute the checksum to embed in UDP header, composed of the sum of IP + * pseudo-header checksum, UDP header checksum and UDP data checksum + * @iph IP header + * @udph UDP header, which must be immediately followed by UDP data + * + * Returns the total checksum + */ + +static inline __sum16 build_udp_v4_csum(const struct iphdr *iph, + const struct udphdr *udph) +{ + unsigned long sum; + + sum = csum_partial(udph, ntohs(udph->len), 0); + return csum_tcpudp_magic(iph->saddr, iph->daddr, ntohs(udph->len), + IPPROTO_UDP, sum); +} + +/** + * build_udp_v6_csum - compute UDP checksum for UDP over IPv6 + * + * Compute the checksum to embed in UDP header, composed of the sum of IPv6 + * pseudo-header checksum, UDP header checksum and UDP data checksum + * @ip6h IPv6 header + * @udph UDP header, which must be immediately followed by UDP data + * + * Returns the total checksum + */ +static inline __sum16 build_udp_v6_csum(const struct ipv6hdr *ip6h, + const struct udphdr *udph) +{ + unsigned long sum; + + sum = csum_partial(udph, ntohs(udph->len), 0); + return csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ntohs(udph->len), + IPPROTO_UDP, sum); +} + struct tmonitor_ctx; #ifdef TRAFFIC_MONITOR diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c index ca84726d5ac1..fb67ae195a73 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_distill.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -385,7 +385,7 @@ static void test_distilled_base_missing_err(void) "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); btf5 = btf__new_empty(); if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) - return; + goto cleanup; btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ VALIDATE_RAW_BTF( btf5, @@ -478,7 +478,7 @@ static void test_distilled_base_multi_err2(void) "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); btf5 = btf__new_empty(); if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) - return; + goto cleanup; btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */ VALIDATE_RAW_BTF( @@ -601,6 +601,76 @@ cleanup: btf__free(base); } +/* If a needed composite type, which is the member of composite type + * in the split BTF, has a different size in the base BTF we wish to + * relocate with, btf__relocate() should error out. + */ +static void test_distilled_base_embedded_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_struct(btf1, "s1", 4); /* [2] struct s1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_struct(btf2, "with_embedded", 8); /* [3] struct with_embedded { */ + btf__add_field(btf2, "e1", 2, 0, 0); /* struct s1 e1; */ + /* } */ + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[3] STRUCT 'with_embedded' size=8 vlen=1\n" + "\t'e1' type_id=2 bits_offset=0"); + + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] STRUCT 's1' size=4 vlen=0", + "[2] STRUCT 'with_embedded' size=8 vlen=1\n" + "\t'e1' type_id=1 bits_offset=0"); + + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + goto cleanup; + + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + /* struct with the same name but different size */ + btf__add_struct(btf5, "s1", 8); /* [2] struct s1 { */ + btf__add_field(btf5, "f1", 1, 0, 0); /* int f1; */ + /* } */ + + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); +cleanup: + btf__free(btf5); + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + void test_btf_distill(void) { if (test__start_subtest("distilled_base")) @@ -613,6 +683,8 @@ void test_btf_distill(void) test_distilled_base_multi_err(); if (test__start_subtest("distilled_base_multi_err2")) test_distilled_base_multi_err2(); + if (test__start_subtest("distilled_base_embedded_err")) + test_distilled_base_embedded_err(); if (test__start_subtest("distilled_base_vmlinux")) test_distilled_base_vmlinux(); if (test__start_subtest("distilled_endianness")) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c new file mode 100644 index 000000000000..e1a90c10db8c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_direct_packet_access.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "cgroup_skb_direct_packet_access.skel.h" + +void test_cgroup_skb_prog_run_direct_packet_access(void) +{ + int err; + struct cgroup_skb_direct_packet_access *skel; + char test_skb[64] = {}; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = test_skb, + .data_size_in = sizeof(test_skb), + ); + + skel = cgroup_skb_direct_packet_access__open_and_load(); + if (!ASSERT_OK_PTR(skel, "cgroup_skb_direct_packet_access__open_and_load")) + return; + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.direct_packet_access), &topts); + ASSERT_OK(err, "bpf_prog_test_run_opts err"); + ASSERT_EQ(topts.retval, 1, "retval"); + + ASSERT_NEQ(skel->bss->data_end, 0, "data_end"); + + cgroup_skb_direct_packet_access__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 1c682550e0e7..e10ea92c3fe2 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -2,7 +2,7 @@ #define _GNU_SOURCE #include <test_progs.h> #include "progs/core_reloc_types.h" -#include "bpf_testmod/bpf_testmod.h" +#include "test_kmods/bpf_testmod.h" #include <linux/limits.h> #include <sys/mman.h> #include <sys/syscall.h> diff --git a/tools/testing/selftests/bpf/prog_tests/fd_array.c b/tools/testing/selftests/bpf/prog_tests/fd_array.c new file mode 100644 index 000000000000..a1d52e73fb16 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fd_array.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include <linux/btf.h> +#include <bpf/bpf.h> + +#include "../test_btf.h" + +static inline int new_map(void) +{ + const char *name = NULL; + __u32 max_entries = 1; + __u32 value_size = 8; + __u32 key_size = 4; + + return bpf_map_create(BPF_MAP_TYPE_ARRAY, name, + key_size, value_size, + max_entries, NULL); +} + +static int new_btf(void) +{ + struct btf_blob { + struct btf_header btf_hdr; + __u32 types[8]; + __u32 str; + } raw_btf = { + .btf_hdr = { + .magic = BTF_MAGIC, + .version = BTF_VERSION, + .hdr_len = sizeof(struct btf_header), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), + }, + .types = { + /* long */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */ + /* unsigned long */ + BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */ + }, + }; + + return bpf_btf_load(&raw_btf, sizeof(raw_btf), NULL); +} + +#define Close(FD) do { \ + if ((FD) >= 0) { \ + close(FD); \ + FD = -1; \ + } \ +} while(0) + +static bool map_exists(__u32 id) +{ + int fd; + + fd = bpf_map_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static bool btf_exists(__u32 id) +{ + int fd; + + fd = bpf_btf_get_fd_by_id(id); + if (fd >= 0) { + close(fd); + return true; + } + return false; +} + +static inline int bpf_prog_get_map_ids(int prog_fd, __u32 *nr_map_ids, __u32 *map_ids) +{ + __u32 len = sizeof(struct bpf_prog_info); + struct bpf_prog_info info; + int err; + + memset(&info, 0, len); + info.nr_map_ids = *nr_map_ids, + info.map_ids = ptr_to_u64(map_ids), + + err = bpf_prog_get_info_by_fd(prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd")) + return -1; + + *nr_map_ids = info.nr_map_ids; + + return 0; +} + +static int __load_test_prog(int map_fd, const int *fd_array, int fd_array_cnt) +{ + /* A trivial program which uses one map */ + struct bpf_insn insns[] = { + BPF_LD_MAP_FD(BPF_REG_1, map_fd), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + + opts.fd_array = fd_array; + opts.fd_array_cnt = fd_array_cnt; + + return bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, ARRAY_SIZE(insns), &opts); +} + +static int load_test_prog(const int *fd_array, int fd_array_cnt) +{ + int map_fd; + int ret; + + map_fd = new_map(); + if (!ASSERT_GE(map_fd, 0, "new_map")) + return map_fd; + + ret = __load_test_prog(map_fd, fd_array, fd_array_cnt); + close(map_fd); + return ret; +} + +static bool check_expected_map_ids(int prog_fd, int expected, __u32 *map_ids, __u32 *nr_map_ids) +{ + int err; + + err = bpf_prog_get_map_ids(prog_fd, nr_map_ids, map_ids); + if (!ASSERT_OK(err, "bpf_prog_get_map_ids")) + return false; + if (!ASSERT_EQ(*nr_map_ids, expected, "unexpected nr_map_ids")) + return false; + + return true; +} + +/* + * Load a program, which uses one map. No fd_array maps are present. + * On return only one map is expected to be bound to prog. + */ +static void check_fd_array_cnt__no_fd_array(void) +{ + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + prog_fd = load_test_prog(NULL, 0); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + return; + nr_map_ids = ARRAY_SIZE(map_ids); + check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids); + close(prog_fd); +} + +/* + * Load a program, which uses one map, and pass two extra, non-equal, maps in + * fd_array with fd_array_cnt=2. On return three maps are expected to be bound + * to the program. + */ +static void check_fd_array_cnt__fd_array_ok(void) +{ + int extra_fds[2] = { -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 2); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map_ids[0] should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map_ids[1] should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Load a program with a few extra maps duplicated in the fd_array. + * After the load maps should only be referenced once. + */ +static void check_fd_array_cnt__duplicated_maps(void) +{ + int extra_fds[4] = { -1, -1, -1, -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = extra_fds[2] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = extra_fds[3] = new_map(); + if (!ASSERT_GE(extra_fds[1], 0, "new_map")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 4); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 3, map_ids, &nr_map_ids)) + goto cleanup; + + /* maps should still exist when original file descriptors are closed */ + Close(extra_fds[0]); + Close(extra_fds[1]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + if (!ASSERT_EQ(map_exists(map_ids[1]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[1]); + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Check that if maps which are referenced by a program are + * passed in fd_array, then they will be referenced only once + */ +static void check_fd_array_cnt__referenced_maps_in_fd_array(void) +{ + int extra_fds[1] = { -1 }; + __u32 map_ids[16]; + __u32 nr_map_ids; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + prog_fd = __load_test_prog(extra_fds[0], extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + nr_map_ids = ARRAY_SIZE(map_ids); + if (!check_expected_map_ids(prog_fd, 1, map_ids, &nr_map_ids)) + goto cleanup; + + /* map should still exist when original file descriptor is closed */ + Close(extra_fds[0]); + if (!ASSERT_EQ(map_exists(map_ids[0]), true, "map should exist")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +static int get_btf_id_by_fd(int btf_fd, __u32 *id) +{ + struct bpf_btf_info info; + __u32 info_len = sizeof(info); + int err; + + memset(&info, 0, info_len); + err = bpf_btf_get_info_by_fd(btf_fd, &info, &info_len); + if (err) + return err; + if (id) + *id = info.id; + return 0; +} + +/* + * Check that fd_array operates properly for btfs. Namely, to check that + * passing a btf fd in fd_array increases its reference count, do the + * following: + * 1) Create a new btf, it's referenced only by a file descriptor, so refcnt=1 + * 2) Load a BPF prog with fd_array[0] = btf_fd; now btf's refcnt=2 + * 3) Close the btf_fd, now refcnt=1 + * Wait and check that BTF stil exists. + */ +static void check_fd_array_cnt__referenced_btfs(void) +{ + int extra_fds[1] = { -1 }; + int prog_fd = -1; + __u32 btf_id; + int tries; + int err; + + extra_fds[0] = new_btf(); + if (!ASSERT_GE(extra_fds[0], 0, "new_btf")) + goto cleanup; + prog_fd = load_test_prog(extra_fds, 1); + if (!ASSERT_GE(prog_fd, 0, "BPF_PROG_LOAD")) + goto cleanup; + + /* btf should still exist when original file descriptor is closed */ + err = get_btf_id_by_fd(extra_fds[0], &btf_id); + if (!ASSERT_GE(err, 0, "get_btf_id_by_fd")) + goto cleanup; + + Close(extra_fds[0]); + + if (!ASSERT_GE(kern_sync_rcu(), 0, "kern_sync_rcu 1")) + goto cleanup; + + if (!ASSERT_EQ(btf_exists(btf_id), true, "btf should exist")) + goto cleanup; + + Close(prog_fd); + + /* The program is freed by a workqueue, so no reliable + * way to sync, so just wait a bit (max ~1 second). */ + for (tries = 100; tries >= 0; tries--) { + usleep(1000); + + if (!btf_exists(btf_id)) + break; + + if (tries) + continue; + + PRINT_FAIL("btf should have been freed"); + } + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[0]); + Close(prog_fd); +} + +/* + * Test that a program with trash in fd_array can't be loaded: + * only map and BTF file descriptors should be accepted. + */ +static void check_fd_array_cnt__fd_array_with_trash(void) +{ + int extra_fds[3] = { -1, -1, -1 }; + int prog_fd = -1; + + extra_fds[0] = new_map(); + if (!ASSERT_GE(extra_fds[0], 0, "new_map")) + goto cleanup; + extra_fds[1] = new_btf(); + if (!ASSERT_GE(extra_fds[1], 0, "new_btf")) + goto cleanup; + + /* trash 1: not a file descriptor */ + extra_fds[2] = 0xbeef; + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EBADF, "prog should have been rejected with -EBADF")) + goto cleanup; + + /* trash 2: not a map or btf */ + extra_fds[2] = socket(AF_INET, SOCK_STREAM, 0); + if (!ASSERT_GE(extra_fds[2], 0, "socket")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_EQ(prog_fd, -EINVAL, "prog should have been rejected with -EINVAL")) + goto cleanup; + + /* Validate that the prog is ok if trash is removed */ + Close(extra_fds[2]); + extra_fds[2] = new_btf(); + if (!ASSERT_GE(extra_fds[2], 0, "new_btf")) + goto cleanup; + + prog_fd = load_test_prog(extra_fds, 3); + if (!ASSERT_GE(prog_fd, 0, "prog should have been loaded")) + goto cleanup; + + /* some fds might be invalid, so ignore return codes */ +cleanup: + Close(extra_fds[2]); + Close(extra_fds[1]); + Close(extra_fds[0]); +} + +/* + * Test that a program with too big fd_array can't be loaded. + */ +static void check_fd_array_cnt__fd_array_too_big(void) +{ + int extra_fds[65]; + int prog_fd = -1; + int i; + + for (i = 0; i < 65; i++) { + extra_fds[i] = new_map(); + if (!ASSERT_GE(extra_fds[i], 0, "new_map")) + goto cleanup_fds; + } + + prog_fd = load_test_prog(extra_fds, 65); + ASSERT_EQ(prog_fd, -E2BIG, "prog should have been rejected with -E2BIG"); + +cleanup_fds: + while (i > 0) + Close(extra_fds[--i]); +} + +void test_fd_array_cnt(void) +{ + if (test__start_subtest("no-fd-array")) + check_fd_array_cnt__no_fd_array(); + + if (test__start_subtest("fd-array-ok")) + check_fd_array_cnt__fd_array_ok(); + + if (test__start_subtest("fd-array-dup-input")) + check_fd_array_cnt__duplicated_maps(); + + if (test__start_subtest("fd-array-ref-maps-in-array")) + check_fd_array_cnt__referenced_maps_in_fd_array(); + + if (test__start_subtest("fd-array-ref-btfs")) + check_fd_array_cnt__referenced_btfs(); + + if (test__start_subtest("fd-array-trash-input")) + check_fd_array_cnt__fd_array_with_trash(); + + if (test__start_subtest("fd-array-2big")) + check_fd_array_cnt__fd_array_too_big(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index d50cbd8040d4..e59af2aa6601 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -171,6 +171,10 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, /* See also arch_adjust_kprobe_addr(). */ if (skel->kconfig->CONFIG_X86_KERNEL_IBT) entry_offset = 4; + if (skel->kconfig->CONFIG_PPC64 && + skel->kconfig->CONFIG_KPROBES_ON_FTRACE && + !skel->kconfig->CONFIG_PPC_FTRACE_OUT_OF_LINE) + entry_offset = 4; err = verify_perf_link_info(link_fd, type, kprobe_addr, 0, entry_offset); ASSERT_OK(err, "verify_perf_link_info"); } else { diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index cfcc90cb7ffb..08bae13248c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -7,39 +7,14 @@ #include "bpf_flow.skel.h" +#define TEST_NS "flow_dissector_ns" #define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */ +#define TEST_NAME_MAX_LEN 64 #ifndef IP_MF #define IP_MF 0x2000 #endif -#define CHECK_FLOW_KEYS(desc, got, expected) \ - _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ - desc, \ - topts.duration, \ - "nhoff=%u/%u " \ - "thoff=%u/%u " \ - "addr_proto=0x%x/0x%x " \ - "is_frag=%u/%u " \ - "is_first_frag=%u/%u " \ - "is_encap=%u/%u " \ - "ip_proto=0x%x/0x%x " \ - "n_proto=0x%x/0x%x " \ - "flow_label=0x%x/0x%x " \ - "sport=%u/%u " \ - "dport=%u/%u\n", \ - got.nhoff, expected.nhoff, \ - got.thoff, expected.thoff, \ - got.addr_proto, expected.addr_proto, \ - got.is_frag, expected.is_frag, \ - got.is_first_frag, expected.is_first_frag, \ - got.is_encap, expected.is_encap, \ - got.ip_proto, expected.ip_proto, \ - got.n_proto, expected.n_proto, \ - got.flow_label, expected.flow_label, \ - got.sport, expected.sport, \ - got.dport, expected.dport) - struct ipv4_pkt { struct ethhdr eth; struct iphdr iph; @@ -89,6 +64,19 @@ struct dvlan_ipv6_pkt { struct tcphdr tcp; } __packed; +struct gre_base_hdr { + __be16 flags; + __be16 protocol; +} gre_base_hdr; + +struct gre_minimal_pkt { + struct ethhdr eth; + struct iphdr iph; + struct gre_base_hdr gre_hdr; + struct iphdr iph_inner; + struct tcphdr tcp; +} __packed; + struct test { const char *name; union { @@ -98,6 +86,7 @@ struct test { struct ipv6_pkt ipv6; struct ipv6_frag_pkt ipv6_frag; struct dvlan_ipv6_pkt dvlan_ipv6; + struct gre_minimal_pkt gre_minimal; } pkt; struct bpf_flow_keys keys; __u32 flags; @@ -106,7 +95,6 @@ struct test { #define VLAN_HLEN 4 -static __u32 duration; struct test tests[] = { { .name = "ipv4", @@ -444,8 +432,137 @@ struct test tests[] = { }, .retval = BPF_FLOW_DISSECTOR_CONTINUE, }, + { + .name = "ip-gre", + .pkt.gre_minimal = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_GRE, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .gre_hdr = { + .flags = 0, + .protocol = __bpf_constant_htons(ETH_P_IP), + }, + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES - + sizeof(struct iphdr)), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) * 2 + + sizeof(struct gre_base_hdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + .sport = 80, + .dport = 8080, + }, + .retval = BPF_OK, + }, + { + .name = "ip-gre-no-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_GRE, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES - + sizeof(struct iphdr)), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) + + sizeof(struct gre_base_hdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_GRE, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .retval = BPF_OK, + }, }; +void serial_test_flow_dissector_namespace(void) +{ + struct bpf_flow *skel; + struct nstoken *ns; + int err, prog_fd; + + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + return; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "get dissector fd")) + goto out_destroy_skel; + + /* We must be able to attach a flow dissector to root namespace */ + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_OK(err, "attach on root namespace ok")) + goto out_destroy_skel; + + err = make_netns(TEST_NS); + if (!ASSERT_OK(err, "create non-root net namespace")) + goto out_destroy_skel; + + /* We must not be able to additionally attach a flow dissector to a + * non-root net namespace + */ + ns = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(ns, "enter non-root net namespace")) + goto out_clean_ns; + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_ERR(err, + "refuse new flow dissector in non-root net namespace")) + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + else + ASSERT_EQ(errno, EEXIST, + "refused because of already attached prog"); + close_netns(ns); + + /* If no flow dissector is attached to the root namespace, we must + * be able to attach one to a non-root net namespace + */ + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + ns = open_netns(TEST_NS); + ASSERT_OK_PTR(ns, "enter non-root net namespace"); + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + close_netns(ns); + ASSERT_OK(err, "accept new flow dissector in non-root net namespace"); + + /* If a flow dissector is attached to non-root net namespace, attaching + * a flow dissector to root namespace must fail + */ + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace")) + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + else + ASSERT_EQ(errno, EEXIST, + "refused because of already attached prog"); + + ns = open_netns(TEST_NS); + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); + close_netns(ns); +out_clean_ns: + remove_netns(TEST_NS); +out_destroy_skel: + bpf_flow__destroy(skel); +} + static int create_tap(const char *ifname) { struct ifreq ifr = { @@ -533,22 +650,27 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) return 0; } -static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) +static void run_tests_skb_less(int tap_fd, struct bpf_map *keys, + char *test_suffix) { + char test_name[TEST_NAME_MAX_LEN]; int i, err, keys_fd; keys_fd = bpf_map__fd(keys); - if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) + if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd")) return; for (i = 0; i < ARRAY_SIZE(tests); i++) { /* Keep in sync with 'flags' from eth_get_headlen. */ __u32 eth_get_headlen_flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; - LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_flow_keys flow_keys = {}; __u32 key = (__u32)(tests[i].keys.sport) << 16 | tests[i].keys.dport; + snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name, + test_suffix); + if (!test__start_subtest(test_name)) + continue; /* For skb-less case we can't pass input flags; run * only the tests that have a matching set of flags. @@ -558,78 +680,139 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) continue; err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); - CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); + if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap")) + continue; /* check the stored flow_keys only if BPF_OK expected */ if (tests[i].retval != BPF_OK) continue; err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); - ASSERT_OK(err, "bpf_map_lookup_elem"); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + continue; - CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + ASSERT_MEMEQ(&flow_keys, &tests[i].keys, + sizeof(struct bpf_flow_keys), + "returned flow keys"); err = bpf_map_delete_elem(keys_fd, &key); ASSERT_OK(err, "bpf_map_delete_elem"); } } -static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_direct_attach(void) { - int err, prog_fd; + int err, prog_fd, tap_fd; + struct bpf_flow *skel; + struct netns_obj *ns; - prog_fd = bpf_program__fd(skel->progs._dissect); - if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) return; + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) + goto out_destroy_skel; + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); - if (CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno)) - return; + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_destroy_skel; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap; - run_tests_skb_less(tap_fd, skel->maps.last_dissection); + run_tests_skb_less(tap_fd, skel->maps.last_dissection, + "non-skb-direct-attach"); err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); - CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno); + ASSERT_OK(err, "bpf_prog_detach2"); + +out_close_tap: + close(tap_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); } -static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd) +void test_flow_dissector_skb_less_indirect_attach(void) { + int err, net_fd, tap_fd; + struct bpf_flow *skel; struct bpf_link *link; - int err, net_fd; + struct netns_obj *ns; - net_fd = open("/proc/self/ns/net", O_RDONLY); - if (CHECK(net_fd < 0, "open(/proc/self/ns/net)", "err %d\n", errno)) + ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true); + if (!ASSERT_OK_PTR(ns, "create and open netns")) return; + skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) + goto out_clean_ns; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net")) + goto out_destroy_skel; + + err = init_prog_array(skel->obj, skel->maps.jmp_table); + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + tap_fd = create_tap("tap0"); + if (!ASSERT_OK_FD(tap_fd, "create_tap")) + goto out_close_ns; + err = ifup("tap0"); + if (!ASSERT_OK(err, "ifup")) + goto out_close_tap; + link = bpf_program__attach_netns(skel->progs._dissect, net_fd); if (!ASSERT_OK_PTR(link, "attach_netns")) - goto out_close; + goto out_close_tap; - run_tests_skb_less(tap_fd, skel->maps.last_dissection); + run_tests_skb_less(tap_fd, skel->maps.last_dissection, + "non-skb-indirect-attach"); err = bpf_link__destroy(link); - CHECK(err, "bpf_link__destroy", "err %d\n", err); -out_close: + ASSERT_OK(err, "bpf_link__destroy"); + +out_close_tap: + close(tap_fd); +out_close_ns: close(net_fd); +out_destroy_skel: + bpf_flow__destroy(skel); +out_clean_ns: + netns_free(ns); } -void test_flow_dissector(void) +void test_flow_dissector_skb(void) { - int i, err, prog_fd, keys_fd = -1, tap_fd; + char test_name[TEST_NAME_MAX_LEN]; struct bpf_flow *skel; + int i, err, prog_fd; skel = bpf_flow__open_and_load(); - if (CHECK(!skel, "skel", "failed to open/load skeleton\n")) + if (!ASSERT_OK_PTR(skel, "open/load skeleton")) return; - prog_fd = bpf_program__fd(skel->progs._dissect); - if (CHECK(prog_fd < 0, "bpf_program__fd", "err %d\n", prog_fd)) - goto out_destroy_skel; - keys_fd = bpf_map__fd(skel->maps.last_dissection); - if (CHECK(keys_fd < 0, "bpf_map__fd", "err %d\n", keys_fd)) - goto out_destroy_skel; err = init_prog_array(skel->obj, skel->maps.jmp_table); - if (CHECK(err, "init_prog_array", "err %d\n", err)) + if (!ASSERT_OK(err, "init_prog_array")) + goto out_destroy_skel; + + prog_fd = bpf_program__fd(skel->progs._dissect); + if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd")) goto out_destroy_skel; for (i = 0; i < ARRAY_SIZE(tests); i++) { @@ -641,6 +824,10 @@ void test_flow_dissector(void) ); static struct bpf_flow_keys ctx = {}; + snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name); + if (!test__start_subtest(test_name)) + continue; + if (tests[i].flags) { topts.ctx_in = &ctx; topts.ctx_size_in = sizeof(ctx); @@ -656,26 +843,12 @@ void test_flow_dissector(void) continue; ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), "test_run data_size_out"); - CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + ASSERT_MEMEQ(&flow_keys, &tests[i].keys, + sizeof(struct bpf_flow_keys), + "returned flow keys"); } - /* Do the same tests but for skb-less flow dissector. - * We use a known path in the net/tun driver that calls - * eth_get_headlen and we manually export bpf_flow_keys - * via BPF map in this case. - */ - - tap_fd = create_tap("tap0"); - CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); - err = ifup("tap0"); - CHECK(err, "ifup", "err %d errno %d\n", err, errno); - - /* Test direct prog attachment */ - test_skb_less_prog_attach(skel, tap_fd); - /* Test indirect prog attachment via link */ - test_skb_less_link_create(skel, tap_fd); - - close(tap_fd); out_destroy_skel: bpf_flow__destroy(skel); } + diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c new file mode 100644 index 000000000000..80b153d3ddec --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_classification.c @@ -0,0 +1,797 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define _GNU_SOURCE +#include <stdbool.h> +#include <stdlib.h> +#include <stdio.h> +#include <bpf/bpf.h> +#include <linux/bpf.h> +#include <bpf/libbpf.h> +#include <arpa/inet.h> +#include <asm/byteorder.h> +#include <netinet/udp.h> +#include <poll.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/socket.h> +#include <sys/time.h> +#include <unistd.h> +#include "test_progs.h" +#include "network_helpers.h" +#include "bpf_util.h" +#include "bpf_flow.skel.h" + +#define CFG_PORT_INNER 8000 +#define CFG_PORT_GUE 6080 +#define SUBTEST_NAME_MAX_LEN 32 +#define TEST_NAME_MAX_LEN (32 + SUBTEST_NAME_MAX_LEN) +#define MAX_SOURCE_PORTS 3 +#define TEST_PACKETS_COUNT 10 +#define TEST_PACKET_LEN 100 +#define TEST_PACKET_PATTERN 'a' +#define TEST_IPV4 "192.168.0.1/32" +#define TEST_IPV6 "100::a/128" +#define TEST_TUNNEL_REMOTE "127.0.0.2" +#define TEST_TUNNEL_LOCAL "127.0.0.1" + +#define INIT_ADDR4(addr4, port) \ + { \ + .sin_family = AF_INET, \ + .sin_port = __constant_htons(port), \ + .sin_addr.s_addr = __constant_htonl(addr4), \ + } + +#define INIT_ADDR6(addr6, port) \ + { \ + .sin6_family = AF_INET6, \ + .sin6_port = __constant_htons(port), \ + .sin6_addr = addr6, \ + } +#define TEST_IN4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 2, 0) +#define TEST_IN4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, CFG_PORT_INNER) +#define TEST_OUT4_SRC_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK + 1, 0) +#define TEST_OUT4_DST_ADDR_DEFAULT INIT_ADDR4(INADDR_LOOPBACK, 0) + +#define TEST_IN6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) +#define TEST_IN6_DST_ADDR_DEFAULT \ + INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER) +#define TEST_OUT6_SRC_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) +#define TEST_OUT6_DST_ADDR_DEFAULT INIT_ADDR6(IN6ADDR_LOOPBACK_INIT, 0) + +#define TEST_IN4_SRC_ADDR_DISSECT_CONTINUE INIT_ADDR4(INADDR_LOOPBACK + 126, 0) +#define TEST_IN4_SRC_ADDR_IPIP INIT_ADDR4((in_addr_t)0x01010101, 0) +#define TEST_IN4_DST_ADDR_IPIP INIT_ADDR4((in_addr_t)0xC0A80001, CFG_PORT_INNER) + +struct grehdr { + uint16_t unused; + uint16_t protocol; +} __packed; + +struct guehdr { + union { + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 hlen : 5, control : 1, version : 2; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 version : 2, control : 1, hlen : 5; +#else +#error "Please fix <asm/byteorder.h>" +#endif + __u8 proto_ctype; + __be16 flags; + }; + __be32 word; + }; +}; + +static char buf[ETH_DATA_LEN]; + +struct test_configuration { + char name[SUBTEST_NAME_MAX_LEN]; + int (*test_setup)(void); + void (*test_teardown)(void); + int source_ports[MAX_SOURCE_PORTS]; + int cfg_l3_inner; + struct sockaddr_in in_saddr4; + struct sockaddr_in in_daddr4; + struct sockaddr_in6 in_saddr6; + struct sockaddr_in6 in_daddr6; + int cfg_l3_outer; + struct sockaddr_in out_saddr4; + struct sockaddr_in out_daddr4; + struct sockaddr_in6 out_saddr6; + struct sockaddr_in6 out_daddr6; + int cfg_encap_proto; + uint8_t cfg_dsfield_inner; + uint8_t cfg_dsfield_outer; + int cfg_l3_extra; + struct sockaddr_in extra_saddr4; + struct sockaddr_in extra_daddr4; + struct sockaddr_in6 extra_saddr6; + struct sockaddr_in6 extra_daddr6; +}; + +static unsigned long util_gettime(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); +} + +static void build_ipv4_header(void *header, uint8_t proto, uint32_t src, + uint32_t dst, int payload_len, uint8_t tos) +{ + struct iphdr *iph = header; + + iph->ihl = 5; + iph->version = 4; + iph->tos = tos; + iph->ttl = 8; + iph->tot_len = htons(sizeof(*iph) + payload_len); + iph->id = htons(1337); + iph->protocol = proto; + iph->saddr = src; + iph->daddr = dst; + iph->check = build_ip_csum((void *)iph); +} + +static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield) +{ + uint16_t val, *ptr = (uint16_t *)ip6h; + + val = ntohs(*ptr); + val &= 0xF00F; + val |= ((uint16_t)dsfield) << 4; + *ptr = htons(val); +} + +static void build_ipv6_header(void *header, uint8_t proto, + const struct sockaddr_in6 *src, + const struct sockaddr_in6 *dst, int payload_len, + uint8_t dsfield) +{ + struct ipv6hdr *ip6h = header; + + ip6h->version = 6; + ip6h->payload_len = htons(payload_len); + ip6h->nexthdr = proto; + ip6h->hop_limit = 8; + ipv6_set_dsfield(ip6h, dsfield); + + memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr)); + memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr)); +} + +static void build_udp_header(void *header, int payload_len, uint16_t sport, + uint16_t dport, int family) +{ + struct udphdr *udph = header; + int len = sizeof(*udph) + payload_len; + + udph->source = htons(sport); + udph->dest = htons(dport); + udph->len = htons(len); + udph->check = 0; + if (family == AF_INET) + udph->check = build_udp_v4_csum(header - sizeof(struct iphdr), + udph); + else + udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr), + udph); +} + +static void build_gue_header(void *header, uint8_t proto) +{ + struct guehdr *gueh = header; + + gueh->proto_ctype = proto; +} + +static void build_gre_header(void *header, uint16_t proto) +{ + struct grehdr *greh = header; + + greh->protocol = htons(proto); +} + +static int l3_length(int family) +{ + if (family == AF_INET) + return sizeof(struct iphdr); + else + return sizeof(struct ipv6hdr); +} + +static int build_packet(const struct test_configuration *test, uint16_t sport) +{ + int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0; + int el3_len = 0, packet_len; + + memset(buf, 0, ETH_DATA_LEN); + + if (test->cfg_l3_extra) + el3_len = l3_length(test->cfg_l3_extra); + + /* calculate header offsets */ + if (test->cfg_encap_proto) { + ol3_len = l3_length(test->cfg_l3_outer); + + if (test->cfg_encap_proto == IPPROTO_GRE) + ol4_len = sizeof(struct grehdr); + else if (test->cfg_encap_proto == IPPROTO_UDP) + ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr); + } + + il3_len = l3_length(test->cfg_l3_inner); + il4_len = sizeof(struct udphdr); + + packet_len = el3_len + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN; + if (!ASSERT_LE(packet_len, sizeof(buf), "check packet size")) + return -1; + + /* + * Fill packet from inside out, to calculate correct checksums. + * But create ip before udp headers, as udp uses ip for pseudo-sum. + */ + memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len, + TEST_PACKET_PATTERN, TEST_PACKET_LEN); + + /* add zero byte for udp csum padding */ + buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + TEST_PACKET_LEN] = + 0; + + switch (test->cfg_l3_inner) { + case PF_INET: + build_ipv4_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, test->in_saddr4.sin_addr.s_addr, + test->in_daddr4.sin_addr.s_addr, + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_inner); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len + ol3_len + ol4_len, + IPPROTO_UDP, &test->in_saddr6, + &test->in_daddr6, il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_inner); + break; + } + + build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len, + TEST_PACKET_LEN, sport, CFG_PORT_INNER, + test->cfg_l3_inner); + + if (!test->cfg_encap_proto) + return il3_len + il4_len + TEST_PACKET_LEN; + + switch (test->cfg_l3_outer) { + case PF_INET: + build_ipv4_header(buf + el3_len, test->cfg_encap_proto, + test->out_saddr4.sin_addr.s_addr, + test->out_daddr4.sin_addr.s_addr, + ol4_len + il3_len + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_outer); + break; + case PF_INET6: + build_ipv6_header(buf + el3_len, test->cfg_encap_proto, + &test->out_saddr6, &test->out_daddr6, + ol4_len + il3_len + il4_len + TEST_PACKET_LEN, + test->cfg_dsfield_outer); + break; + } + + switch (test->cfg_encap_proto) { + case IPPROTO_UDP: + build_gue_header(buf + el3_len + ol3_len + ol4_len - + sizeof(struct guehdr), + test->cfg_l3_inner == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6); + build_udp_header(buf + el3_len + ol3_len, + sizeof(struct guehdr) + il3_len + il4_len + + TEST_PACKET_LEN, + sport, CFG_PORT_GUE, test->cfg_l3_outer); + break; + case IPPROTO_GRE: + build_gre_header(buf + el3_len + ol3_len, + test->cfg_l3_inner == PF_INET ? ETH_P_IP : + ETH_P_IPV6); + break; + } + + switch (test->cfg_l3_extra) { + case PF_INET: + build_ipv4_header(buf, + test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6, + test->extra_saddr4.sin_addr.s_addr, + test->extra_daddr4.sin_addr.s_addr, + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN, + 0); + break; + case PF_INET6: + build_ipv6_header(buf, + test->cfg_l3_outer == PF_INET ? IPPROTO_IPIP : + IPPROTO_IPV6, + &test->extra_saddr6, &test->extra_daddr6, + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN, + 0); + break; + } + + return el3_len + ol3_len + ol4_len + il3_len + il4_len + + TEST_PACKET_LEN; +} + +/* sender transmits encapsulated over RAW or unencap'd over UDP */ +static int setup_tx(const struct test_configuration *test) +{ + int family, fd, ret; + + if (test->cfg_l3_extra) + family = test->cfg_l3_extra; + else if (test->cfg_l3_outer) + family = test->cfg_l3_outer; + else + family = test->cfg_l3_inner; + + fd = socket(family, SOCK_RAW, IPPROTO_RAW); + if (!ASSERT_OK_FD(fd, "setup tx socket")) + return fd; + + if (test->cfg_l3_extra) { + if (test->cfg_l3_extra == PF_INET) + ret = connect(fd, (void *)&test->extra_daddr4, + sizeof(test->extra_daddr4)); + else + ret = connect(fd, (void *)&test->extra_daddr6, + sizeof(test->extra_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } else if (test->cfg_l3_outer) { + /* connect to destination if not encapsulated */ + if (test->cfg_l3_outer == PF_INET) + ret = connect(fd, (void *)&test->out_daddr4, + sizeof(test->out_daddr4)); + else + ret = connect(fd, (void *)&test->out_daddr6, + sizeof(test->out_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } else { + /* otherwise using loopback */ + if (test->cfg_l3_inner == PF_INET) + ret = connect(fd, (void *)&test->in_daddr4, + sizeof(test->in_daddr4)); + else + ret = connect(fd, (void *)&test->in_daddr6, + sizeof(test->in_daddr6)); + if (!ASSERT_OK(ret, "connect")) { + close(fd); + return ret; + } + } + + return fd; +} + +/* receiver reads unencapsulated UDP */ +static int setup_rx(const struct test_configuration *test) +{ + int fd, ret; + + fd = socket(test->cfg_l3_inner, SOCK_DGRAM, 0); + if (!ASSERT_OK_FD(fd, "socket rx")) + return fd; + + if (test->cfg_l3_inner == PF_INET) + ret = bind(fd, (void *)&test->in_daddr4, + sizeof(test->in_daddr4)); + else + ret = bind(fd, (void *)&test->in_daddr6, + sizeof(test->in_daddr6)); + if (!ASSERT_OK(ret, "bind rx")) { + close(fd); + return ret; + } + + return fd; +} + +static int do_tx(int fd, const char *pkt, int len) +{ + int ret; + + ret = write(fd, pkt, len); + return ret != len; +} + +static int do_poll(int fd, short events, int timeout) +{ + struct pollfd pfd; + int ret; + + pfd.fd = fd; + pfd.events = events; + + ret = poll(&pfd, 1, timeout); + return ret; +} + +static int do_rx(int fd) +{ + char rbuf; + int ret, num = 0; + + while (1) { + ret = recv(fd, &rbuf, 1, MSG_DONTWAIT); + if (ret == -1 && errno == EAGAIN) + break; + if (ret < 0) + return -1; + if (!ASSERT_EQ(rbuf, TEST_PACKET_PATTERN, "check pkt pattern")) + return -1; + num++; + } + + return num; +} + +static int run_test(const struct test_configuration *test, + int source_port_index) +{ + int fdt = -1, fdr = -1, len, tx = 0, rx = 0, err; + unsigned long tstop, tcur; + + fdr = setup_rx(test); + fdt = setup_tx(test); + if (!ASSERT_OK_FD(fdr, "setup rx") || !ASSERT_OK_FD(fdt, "setup tx")) { + err = -1; + goto out_close_sockets; + } + + len = build_packet(test, + (uint16_t)test->source_ports[source_port_index]); + if (!ASSERT_GT(len, 0, "build test packet")) + return -1; + + tcur = util_gettime(); + tstop = tcur; + + while (tx < TEST_PACKETS_COUNT) { + if (!ASSERT_OK(do_tx(fdt, buf, len), "do_tx")) + break; + tx++; + err = do_rx(fdr); + if (!ASSERT_GE(err, 0, "do_rx")) + break; + rx += err; + } + + /* read straggler packets, if any */ + if (rx < tx) { + tstop = util_gettime() + 100; + while (rx < tx) { + tcur = util_gettime(); + if (tcur >= tstop) + break; + + err = do_poll(fdr, POLLIN, tstop - tcur); + if (err < 0) + break; + err = do_rx(fdr); + if (err >= 0) + rx += err; + } + } + +out_close_sockets: + close(fdt); + close(fdr); + return rx; +} + +static int attach_and_configure_program(struct bpf_flow *skel) +{ + struct bpf_map *prog_array = skel->maps.jmp_table; + int main_prog_fd, sub_prog_fd, map_fd, i, err; + struct bpf_program *prog; + char prog_name[32]; + + main_prog_fd = bpf_program__fd(skel->progs._dissect); + if (main_prog_fd < 0) + return main_prog_fd; + + err = bpf_prog_attach(main_prog_fd, 0, BPF_FLOW_DISSECTOR, 0); + if (err) + return err; + + map_fd = bpf_map__fd(prog_array); + if (map_fd < 0) + return map_fd; + + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!prog) + return -1; + + sub_prog_fd = bpf_program__fd(prog); + if (sub_prog_fd < 0) + return -1; + + err = bpf_map_update_elem(map_fd, &i, &sub_prog_fd, BPF_ANY); + if (err) + return -1; + } + + return main_prog_fd; +} + +static void detach_program(struct bpf_flow *skel, int prog_fd) +{ + bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR); +} + +static int set_port_drop(int pf, bool multi_port) +{ + char dst_port[16]; + + snprintf(dst_port, sizeof(dst_port), "%d", CFG_PORT_INNER); + + SYS(fail, "tc qdisc add dev lo ingress"); + SYS(fail_delete_qdisc, "tc filter add %s %s %s %s %s %s %s %s %s %s %s %s", + "dev lo", + "parent FFFF:", + "protocol", pf == PF_INET6 ? "ipv6" : "ip", + "pref 1337", + "flower", + "ip_proto udp", + "src_port", multi_port ? "8-10" : "9", + "dst_port", dst_port, + "action drop"); + return 0; + +fail_delete_qdisc: + SYS_NOFAIL("tc qdisc del dev lo ingress"); +fail: + return 1; +} + +static void remove_filter(void) +{ + SYS_NOFAIL("tc filter del dev lo ingress"); + SYS_NOFAIL("tc qdisc del dev lo ingress"); +} + +static int ipv4_setup(void) +{ + return set_port_drop(PF_INET, false); +} + +static int ipv6_setup(void) +{ + return set_port_drop(PF_INET6, false); +} + +static int port_range_setup(void) +{ + return set_port_drop(PF_INET, true); +} + +static int set_addresses(void) +{ + SYS(out, "ip -4 addr add %s dev lo", TEST_IPV4); + SYS(out_remove_ipv4, "ip -6 addr add %s dev lo", TEST_IPV6); + return 0; +out_remove_ipv4: + SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4); +out: + return -1; +} + +static void unset_addresses(void) +{ + SYS_NOFAIL("ip -4 addr del %s dev lo", TEST_IPV4); + SYS_NOFAIL("ip -6 addr del %s dev lo", TEST_IPV6); +} + +static int ipip_setup(void) +{ + if (!ASSERT_OK(set_addresses(), "configure addresses")) + return -1; + if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter")) + goto out_unset_addresses; + SYS(out_remove_filter, + "ip link add ipip_test type ipip remote %s local %s dev lo", + TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL); + SYS(out_clean_netif, "ip link set ipip_test up"); + return 0; + +out_clean_netif: + SYS_NOFAIL("ip link del ipip_test"); +out_remove_filter: + remove_filter(); +out_unset_addresses: + unset_addresses(); + return -1; +} + +static void ipip_shutdown(void) +{ + SYS_NOFAIL("ip link del ipip_test"); + remove_filter(); + unset_addresses(); +} + +static int gre_setup(void) +{ + if (!ASSERT_OK(set_addresses(), "configure addresses")) + return -1; + if (!ASSERT_OK(set_port_drop(PF_INET, false), "set filter")) + goto out_unset_addresses; + SYS(out_remove_filter, + "ip link add gre_test type gre remote %s local %s dev lo", + TEST_TUNNEL_REMOTE, TEST_TUNNEL_LOCAL); + SYS(out_clean_netif, "ip link set gre_test up"); + return 0; + +out_clean_netif: + SYS_NOFAIL("ip link del ipip_test"); +out_remove_filter: + remove_filter(); +out_unset_addresses: + unset_addresses(); + return -1; +} + +static void gre_shutdown(void) +{ + SYS_NOFAIL("ip link del gre_test"); + remove_filter(); + unset_addresses(); +} + +static const struct test_configuration tests_input[] = { + { + .name = "ipv4", + .test_setup = ipv4_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT + }, + { + .name = "ipv4_continue_dissect", + .test_setup = ipv4_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DISSECT_CONTINUE, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT }, + { + .name = "ipip", + .test_setup = ipip_setup, + .test_teardown = ipip_shutdown, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP, + .in_daddr4 = TEST_IN4_DST_ADDR_IPIP, + .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT, + .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT, + .cfg_l3_outer = PF_INET, + .cfg_encap_proto = IPPROTO_IPIP, + + }, + { + .name = "gre", + .test_setup = gre_setup, + .test_teardown = gre_shutdown, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_IPIP, + .in_daddr4 = TEST_IN4_DST_ADDR_IPIP, + .out_saddr4 = TEST_OUT4_SRC_ADDR_DEFAULT, + .out_daddr4 = TEST_OUT4_DST_ADDR_DEFAULT, + .cfg_l3_outer = PF_INET, + .cfg_encap_proto = IPPROTO_GRE, + }, + { + .name = "port_range", + .test_setup = port_range_setup, + .test_teardown = remove_filter, + .source_ports = { 7, 9, 11 }, + .cfg_l3_inner = PF_INET, + .in_saddr4 = TEST_IN4_SRC_ADDR_DEFAULT, + .in_daddr4 = TEST_IN4_DST_ADDR_DEFAULT }, + { + .name = "ipv6", + .test_setup = ipv6_setup, + .test_teardown = remove_filter, + .source_ports = { 8, 9, 10 }, + .cfg_l3_inner = PF_INET6, + .in_saddr6 = TEST_IN6_SRC_ADDR_DEFAULT, + .in_daddr6 = TEST_IN6_DST_ADDR_DEFAULT + }, +}; + +struct test_ctx { + struct bpf_flow *skel; + struct netns_obj *ns; + int prog_fd; +}; + +static int test_global_init(struct test_ctx *ctx) +{ + int err; + + ctx->skel = bpf_flow__open_and_load(); + if (!ASSERT_OK_PTR(ctx->skel, "open and load flow_dissector")) + return -1; + + ctx->ns = netns_new("flow_dissector_classification", true); + if (!ASSERT_OK_PTR(ctx->ns, "switch ns")) + goto out_destroy_skel; + + err = write_sysctl("/proc/sys/net/ipv4/conf/default/rp_filter", "0"); + err |= write_sysctl("/proc/sys/net/ipv4/conf/all/rp_filter", "0"); + err |= write_sysctl("/proc/sys/net/ipv4/conf/lo/rp_filter", "0"); + if (!ASSERT_OK(err, "configure net tunables")) + goto out_clean_ns; + + ctx->prog_fd = attach_and_configure_program(ctx->skel); + if (!ASSERT_OK_FD(ctx->prog_fd, "attach and configure program")) + goto out_clean_ns; + return 0; +out_clean_ns: + netns_free(ctx->ns); +out_destroy_skel: + bpf_flow__destroy(ctx->skel); + return -1; +} + +static void test_global_shutdown(struct test_ctx *ctx) +{ + detach_program(ctx->skel, ctx->prog_fd); + netns_free(ctx->ns); + bpf_flow__destroy(ctx->skel); +} + +void test_flow_dissector_classification(void) +{ + struct test_ctx ctx; + const struct test_configuration *test; + int i; + + if (test_global_init(&ctx)) + return; + + for (i = 0; i < ARRAY_SIZE(tests_input); i++) { + if (!test__start_subtest(tests_input[i].name)) + continue; + test = &tests_input[i]; + /* All tests are expected to have one rx-ok port first, + * then a non-working rx port, and finally a rx-ok port + */ + if (test->test_setup && + !ASSERT_OK(test->test_setup(), "init filter")) + continue; + + ASSERT_EQ(run_test(test, 0), TEST_PACKETS_COUNT, + "test first port"); + ASSERT_EQ(run_test(test, 1), 0, "test second port"); + ASSERT_EQ(run_test(test, 2), TEST_PACKETS_COUNT, + "test third port"); + if (test->test_teardown) + test->test_teardown(); + } + test_global_shutdown(&ctx); +} diff --git a/tools/testing/selftests/bpf/prog_tests/free_timer.c b/tools/testing/selftests/bpf/prog_tests/free_timer.c new file mode 100644 index 000000000000..b7b77a6b2979 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/free_timer.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include <unistd.h> +#include <sys/syscall.h> +#include <test_progs.h> + +#include "free_timer.skel.h" + +struct run_ctx { + struct bpf_program *start_prog; + struct bpf_program *overwrite_prog; + pthread_barrier_t notify; + int loop; + bool start; + bool stop; +}; + +static void start_threads(struct run_ctx *ctx) +{ + ctx->start = true; +} + +static void stop_threads(struct run_ctx *ctx) +{ + ctx->stop = true; + /* Guarantee the order between ->stop and ->start */ + __atomic_store_n(&ctx->start, true, __ATOMIC_RELEASE); +} + +static int wait_for_start(struct run_ctx *ctx) +{ + while (!__atomic_load_n(&ctx->start, __ATOMIC_ACQUIRE)) + usleep(10); + + return ctx->stop; +} + +static void *overwrite_timer_fn(void *arg) +{ + struct run_ctx *ctx = arg; + int loop, fd, err; + cpu_set_t cpuset; + long ret = 0; + + /* Pin on CPU 0 */ + CPU_ZERO(&cpuset); + CPU_SET(0, &cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); + + /* Is the thread being stopped ? */ + err = wait_for_start(ctx); + if (err) + return NULL; + + fd = bpf_program__fd(ctx->overwrite_prog); + loop = ctx->loop; + while (loop-- > 0) { + LIBBPF_OPTS(bpf_test_run_opts, opts); + + /* Wait for start thread to complete */ + pthread_barrier_wait(&ctx->notify); + + /* Overwrite timers */ + err = bpf_prog_test_run_opts(fd, &opts); + if (err) + ret |= 1; + else if (opts.retval) + ret |= 2; + + /* Notify start thread to start timers */ + pthread_barrier_wait(&ctx->notify); + } + + return (void *)ret; +} + +static void *start_timer_fn(void *arg) +{ + struct run_ctx *ctx = arg; + int loop, fd, err; + cpu_set_t cpuset; + long ret = 0; + + /* Pin on CPU 1 */ + CPU_ZERO(&cpuset); + CPU_SET(1, &cpuset); + pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); + + /* Is the thread being stopped ? */ + err = wait_for_start(ctx); + if (err) + return NULL; + + fd = bpf_program__fd(ctx->start_prog); + loop = ctx->loop; + while (loop-- > 0) { + LIBBPF_OPTS(bpf_test_run_opts, opts); + + /* Run the prog to start timer */ + err = bpf_prog_test_run_opts(fd, &opts); + if (err) + ret |= 4; + else if (opts.retval) + ret |= 8; + + /* Notify overwrite thread to do overwrite */ + pthread_barrier_wait(&ctx->notify); + + /* Wait for overwrite thread to complete */ + pthread_barrier_wait(&ctx->notify); + } + + return (void *)ret; +} + +void test_free_timer(void) +{ + struct free_timer *skel; + struct bpf_program *prog; + struct run_ctx ctx; + pthread_t tid[2]; + void *ret; + int err; + + skel = free_timer__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + memset(&ctx, 0, sizeof(ctx)); + + prog = bpf_object__find_program_by_name(skel->obj, "start_timer"); + if (!ASSERT_OK_PTR(prog, "find start prog")) + goto out; + ctx.start_prog = prog; + + prog = bpf_object__find_program_by_name(skel->obj, "overwrite_timer"); + if (!ASSERT_OK_PTR(prog, "find overwrite prog")) + goto out; + ctx.overwrite_prog = prog; + + pthread_barrier_init(&ctx.notify, NULL, 2); + ctx.loop = 10; + + err = pthread_create(&tid[0], NULL, start_timer_fn, &ctx); + if (!ASSERT_OK(err, "create start_timer")) + goto out; + + err = pthread_create(&tid[1], NULL, overwrite_timer_fn, &ctx); + if (!ASSERT_OK(err, "create overwrite_timer")) { + stop_threads(&ctx); + goto out; + } + + start_threads(&ctx); + + ret = NULL; + err = pthread_join(tid[0], &ret); + ASSERT_EQ(err | (long)ret, 0, "start_timer"); + ret = NULL; + err = pthread_join(tid[1], &ret); + ASSERT_EQ(err | (long)ret, 0, "overwrite_timer"); +out: + free_timer__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 66ab1cae923e..e19ef509ebf8 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -397,6 +397,31 @@ cleanup: kprobe_multi_session_cookie__destroy(skel); } +static void test_unique_match(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi *skel = NULL; + struct bpf_link *link = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) + return; + + opts.unique_match = true; + skel->bss->pid = getpid(); + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "bpf_fentry_test*", &opts); + if (!ASSERT_ERR_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + bpf_link__destroy(link); + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "bpf_fentry_test8*", &opts); + if (ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + bpf_link__destroy(link); + + kprobe_multi__destroy(skel); +} + static size_t symbol_hash(long key, void *ctx __maybe_unused) { return str_hash((const char *) key); @@ -765,5 +790,7 @@ void test_kprobe_multi_test(void) test_session_skel_api(); if (test__start_subtest("session_cookie")) test_session_cookie_skel_api(); + if (test__start_subtest("unique_match")) + test_unique_match(); RUN_TESTS(kprobe_multi_verifier); } diff --git a/tools/testing/selftests/bpf/prog_tests/missed.c b/tools/testing/selftests/bpf/prog_tests/missed.c index 70d90c43537c..ed8857ae914a 100644 --- a/tools/testing/selftests/bpf/prog_tests/missed.c +++ b/tools/testing/selftests/bpf/prog_tests/missed.c @@ -85,6 +85,7 @@ static void test_missed_kprobe_recursion(void) ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses"); ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses"); ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses"); + ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test6)), 1, "test6_recursion_misses"); cleanup: missed_kprobe_recursion__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 884ad87783d5..1e3e4392dcca 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -111,31 +111,35 @@ out: static void test_sockmap_vsock_delete_on_close(void) { - int err, c, p, map; - const int zero = 0; - - err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); - if (!ASSERT_OK(err, "create_pair(AF_VSOCK)")) - return; + int map, c, p, err, zero = 0; map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), sizeof(int), 1, NULL); - if (!ASSERT_GE(map, 0, "bpf_map_create")) { - close(c); - goto out; - } + if (!ASSERT_OK_FD(map, "bpf_map_create")) + return; - err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); - close(c); - if (!ASSERT_OK(err, "bpf_map_update")) - goto out; + err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); + if (!ASSERT_OK(err, "create_pair")) + goto close_map; + + if (xbpf_map_update_elem(map, &zero, &c, BPF_NOEXIST)) + goto close_socks; + + xclose(c); + xclose(p); + + err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); + if (!ASSERT_OK(err, "create_pair")) + goto close_map; - err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); ASSERT_OK(err, "after close(), bpf_map_update"); -out: - close(p); - close(map); +close_socks: + xclose(c); + xclose(p); +close_map: + xclose(map); } static void test_skmsg_helpers(enum bpf_map_type map_type) @@ -522,8 +526,8 @@ static void test_sockmap_skb_verdict_shutdown(void) if (!ASSERT_EQ(err, 1, "epoll_wait(fd)")) goto out_close; - n = recv(c1, &b, 1, SOCK_NONBLOCK); - ASSERT_EQ(n, 0, "recv_timeout(fin)"); + n = recv(c1, &b, 1, MSG_DONTWAIT); + ASSERT_EQ(n, 0, "recv(fin)"); out_close: close(c1); close(p1); @@ -531,57 +535,6 @@ out: test_sockmap_pass_prog__destroy(skel); } -static void test_sockmap_stream_pass(void) -{ - int zero = 0, sent, recvd; - int verdict, parser; - int err, map; - int c = -1, p = -1; - struct test_sockmap_pass_prog *pass = NULL; - char snd[256] = "0123456789"; - char rcv[256] = "0"; - - pass = test_sockmap_pass_prog__open_and_load(); - verdict = bpf_program__fd(pass->progs.prog_skb_verdict); - parser = bpf_program__fd(pass->progs.prog_skb_parser); - map = bpf_map__fd(pass->maps.sock_map_rx); - - err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0); - if (!ASSERT_OK(err, "bpf_prog_attach stream parser")) - goto out; - - err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); - if (!ASSERT_OK(err, "bpf_prog_attach stream verdict")) - goto out; - - err = create_pair(AF_INET, SOCK_STREAM, &c, &p); - if (err) - goto out; - - /* sk_data_ready of 'p' will be replaced by strparser handler */ - err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); - if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) - goto out_close; - - /* - * as 'prog_skb_parser' return the original skb len and - * 'prog_skb_verdict' return SK_PASS, the kernel will just - * pass it through to original socket 'p' - */ - sent = xsend(c, snd, sizeof(snd), 0); - ASSERT_EQ(sent, sizeof(snd), "xsend(c)"); - - recvd = recv_timeout(p, rcv, sizeof(rcv), SOCK_NONBLOCK, - IO_TIMEOUT_SEC); - ASSERT_EQ(recvd, sizeof(rcv), "recv_timeout(p)"); - -out_close: - close(c); - close(p); - -out: - test_sockmap_pass_prog__destroy(pass); -} static void test_sockmap_skb_verdict_fionread(bool pass_prog) { @@ -628,7 +581,7 @@ static void test_sockmap_skb_verdict_fionread(bool pass_prog) ASSERT_EQ(avail, expected, "ioctl(FIONREAD)"); /* On DROP test there will be no data to read */ if (pass_prog) { - recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC); + recvd = recv_timeout(c1, &buf, sizeof(buf), MSG_DONTWAIT, IO_TIMEOUT_SEC); ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)"); } @@ -1061,6 +1014,34 @@ destroy: test_sockmap_pass_prog__destroy(skel); } +static void test_sockmap_vsock_unconnected(void) +{ + struct sockaddr_storage addr; + int map, s, zero = 0; + socklen_t alen; + + map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), + sizeof(int), 1, NULL); + if (!ASSERT_OK_FD(map, "bpf_map_create")) + return; + + s = xsocket(AF_VSOCK, SOCK_STREAM, 0); + if (s < 0) + goto close_map; + + /* Fail connect(), but trigger transport assignment. */ + init_addr_loopback(AF_VSOCK, &addr, &alen); + if (!ASSERT_ERR(connect(s, sockaddr(&addr), alen), "connect")) + goto close_sock; + + ASSERT_ERR(bpf_map_update_elem(map, &zero, &s, BPF_ANY), "map_update"); + +close_sock: + xclose(s); +close_map: + xclose(map); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -1101,8 +1082,6 @@ void test_sockmap_basic(void) test_sockmap_progs_query(BPF_SK_SKB_VERDICT); if (test__start_subtest("sockmap skb_verdict shutdown")) test_sockmap_skb_verdict_shutdown(); - if (test__start_subtest("sockmap stream parser and verdict pass")) - test_sockmap_stream_pass(); if (test__start_subtest("sockmap skb_verdict fionread")) test_sockmap_skb_verdict_fionread(true); if (test__start_subtest("sockmap skb_verdict fionread on drop")) @@ -1127,4 +1106,6 @@ void test_sockmap_basic(void) test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH); if (test__start_subtest("sockmap skb_verdict vsock poll")) test_sockmap_skb_verdict_vsock_poll(); + if (test__start_subtest("sockmap vsock unconnected")) + test_sockmap_vsock_unconnected(); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c new file mode 100644 index 000000000000..621b3b71888e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_strp.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <error.h> +#include <netinet/tcp.h> +#include <test_progs.h> +#include "sockmap_helpers.h" +#include "test_skmsg_load_helpers.skel.h" +#include "test_sockmap_strp.skel.h" + +#define STRP_PKT_HEAD_LEN 4 +#define STRP_PKT_BODY_LEN 6 +#define STRP_PKT_FULL_LEN (STRP_PKT_HEAD_LEN + STRP_PKT_BODY_LEN) + +static const char packet[STRP_PKT_FULL_LEN] = "head+body\0"; +static const int test_packet_num = 100; + +/* Current implementation of tcp_bpf_recvmsg_parser() invokes data_ready + * with sk held if an skb exists in sk_receive_queue. Then for the + * data_ready implementation of strparser, it will delay the read + * operation if sk is held and EAGAIN is returned. + */ +static int sockmap_strp_consume_pre_data(int p) +{ + int recvd; + bool retried = false; + char rcv[10]; + +retry: + errno = 0; + recvd = recv_timeout(p, rcv, sizeof(rcv), 0, 1); + if (recvd < 0 && errno == EAGAIN && retried == false) { + /* On the first call, EAGAIN will certainly be returned. + * A 1-second wait is enough for the workqueue to finish. + */ + sleep(1); + retried = true; + goto retry; + } + + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv error or truncated data") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "data mismatch")) + return -1; + return 0; +} + +static struct test_sockmap_strp *sockmap_strp_init(int *out_map, bool pass, + bool need_parser) +{ + struct test_sockmap_strp *strp = NULL; + int verdict, parser; + int err; + + strp = test_sockmap_strp__open_and_load(); + *out_map = bpf_map__fd(strp->maps.sock_map); + + if (need_parser) + parser = bpf_program__fd(strp->progs.prog_skb_parser_partial); + else + parser = bpf_program__fd(strp->progs.prog_skb_parser); + + if (pass) + verdict = bpf_program__fd(strp->progs.prog_skb_verdict_pass); + else + verdict = bpf_program__fd(strp->progs.prog_skb_verdict); + + err = bpf_prog_attach(parser, *out_map, BPF_SK_SKB_STREAM_PARSER, 0); + if (!ASSERT_OK(err, "bpf_prog_attach stream parser")) + goto err; + + err = bpf_prog_attach(verdict, *out_map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach stream verdict")) + goto err; + + return strp; +err: + test_sockmap_strp__destroy(strp); + return NULL; +} + +/* Dispatch packets to different socket by packet size: + * + * ------ ------ + * | pkt4 || pkt1 |... > remote socket + * ------ ------ / ------ ------ + * | pkt8 | pkt7 |... + * ------ ------ \ ------ ------ + * | pkt3 || pkt2 |... > local socket + * ------ ------ + */ +static void test_sockmap_strp_dispatch_pkt(int family, int sotype) +{ + int i, j, zero = 0, one = 1, recvd; + int err, map; + int c0 = -1, p0 = -1, c1 = -1, p1 = -1; + struct test_sockmap_strp *strp = NULL; + int test_cnt = 6; + char rcv[10]; + struct { + char data[7]; + int data_len; + int send_cnt; + int *receiver; + } send_dir[2] = { + /* data expected to deliver to local */ + {"llllll", 6, 0, &p0}, + /* data expected to deliver to remote */ + {"rrrrr", 5, 0, &c1} + }; + + strp = sockmap_strp_init(&map, false, false); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1); + if (!ASSERT_OK(err, "create_socket_pairs()")) + goto out; + + err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p0)")) + goto out_close; + + err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p1)")) + goto out_close; + + err = setsockopt(c1, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero)); + if (!ASSERT_OK(err, "setsockopt(TCP_NODELAY)")) + goto out_close; + + /* deliver data with data size greater than 5 to local */ + strp->data->verdict_max_size = 5; + + for (i = 0; i < test_cnt; i++) { + int d = i % 2; + + xsend(c0, send_dir[d].data, send_dir[d].data_len, 0); + send_dir[d].send_cnt++; + } + + for (i = 0; i < 2; i++) { + for (j = 0; j < send_dir[i].send_cnt; j++) { + int expected = send_dir[i].data_len; + + recvd = recv_timeout(*send_dir[i].receiver, rcv, + expected, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, expected, "recv_timeout()")) + goto out_close; + if (!ASSERT_OK(memcmp(send_dir[i].data, rcv, recvd), + "data mismatch")) + goto out_close; + } + } +out_close: + close(c0); + close(c1); + close(p0); + close(p1); +out: + test_sockmap_strp__destroy(strp); +} + +/* We have multiple packets in one skb + * ------------ ------------ ------------ + * | packet1 | packet2 | ... + * ------------ ------------ ------------ + */ +static void test_sockmap_strp_multiple_pkt(int family, int sotype) +{ + int i, zero = 0; + int sent, recvd, total; + int err, map; + int c = -1, p = -1; + struct test_sockmap_strp *strp = NULL; + char *snd = NULL, *rcv = NULL; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)")) + goto out_close; + + /* construct multiple packets in one buffer */ + total = test_packet_num * STRP_PKT_FULL_LEN; + snd = malloc(total); + rcv = malloc(total + 1); + if (!ASSERT_TRUE(snd, "malloc(snd)") || + !ASSERT_TRUE(rcv, "malloc(rcv)")) + goto out_close; + + for (i = 0; i < test_packet_num; i++) { + memcpy(snd + i * STRP_PKT_FULL_LEN, + packet, STRP_PKT_FULL_LEN); + } + + sent = xsend(c, snd, total, 0); + if (!ASSERT_EQ(sent, total, "xsend(c)")) + goto out_close; + + /* try to recv one more byte to avoid truncation check */ + recvd = recv_timeout(p, rcv, total + 1, MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, total, "recv(rcv)")) + goto out_close; + + /* we sent TCP segment with multiple encapsulation + * then check whether packets are handled correctly + */ + if (!ASSERT_OK(memcmp(snd, rcv, total), "data mismatch")) + goto out_close; + +out_close: + close(c); + close(p); + if (snd) + free(snd); + if (rcv) + free(rcv); +out: + test_sockmap_strp__destroy(strp); +} + +/* Test strparser with partial read */ +static void test_sockmap_strp_partial_read(int family, int sotype) +{ + int zero = 0, recvd, off; + int err, map; + int c = -1, p = -1; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + /* sk_data_ready of 'p' will be replaced by strparser handler */ + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(zero, p)")) + goto out_close; + + /* 1.1 send partial head, 1 byte header left */ + off = STRP_PKT_HEAD_LEN - 1; + xsend(c, packet, off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "partial head sent, expected no data")) + goto out_close; + + /* 1.2 send remaining head and body */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data")) + goto out_close; + + /* 2.1 send partial head, 1 byte header left */ + off = STRP_PKT_HEAD_LEN - 1; + xsend(c, packet, off, 0); + + /* 2.2 send remaining head and partial body, 1 byte body left */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off - 1, 0); + off = STRP_PKT_FULL_LEN - 1; + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "partial body sent, expected no data")) + goto out_close; + + /* 2.3 send remaining body */ + xsend(c, packet + off, STRP_PKT_FULL_LEN - off, 0); + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "expected full data")) + goto out_close; + +out_close: + close(c); + close(p); + +out: + test_sockmap_strp__destroy(strp); +} + +/* Test simple socket read/write with strparser + FIONREAD */ +static void test_sockmap_strp_pass(int family, int sotype, bool fionread) +{ + int zero = 0, pkt_size = STRP_PKT_FULL_LEN, sent, recvd, avail; + int err, map; + int c = -1, p = -1; + int test_cnt = 10, i; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, true, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + err = create_pair(family, sotype, &c, &p); + if (err) + goto out; + + /* inject some data before bpf process, it should be read + * correctly because we check sk_receive_queue in + * tcp_bpf_recvmsg_parser(). + */ + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "xsend(pre-data)")) + goto out_close; + + /* sk_data_ready of 'p' will be replaced by strparser handler */ + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) + goto out_close; + + /* consume previous data we injected */ + if (sockmap_strp_consume_pre_data(p)) + goto out_close; + + /* Previously, we encountered issues such as deadlocks and + * sequence errors that resulted in the inability to read + * continuously. Therefore, we perform multiple iterations + * of testing here. + */ + for (i = 0; i < test_cnt; i++) { + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "xsend(c)")) + goto out_close; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, pkt_size, "recv_timeout(p)") || + !ASSERT_OK(memcmp(packet, rcv, pkt_size), + "memcmp, data mismatch")) + goto out_close; + } + + if (fionread) { + sent = xsend(c, packet, pkt_size, 0); + if (!ASSERT_EQ(sent, pkt_size, "second xsend(c)")) + goto out_close; + + err = ioctl(p, FIONREAD, &avail); + if (!ASSERT_OK(err, "ioctl(FIONREAD) error") || + !ASSERT_EQ(avail, pkt_size, "ioctl(FIONREAD)")) + goto out_close; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, pkt_size, "second recv_timeout(p)") || + !ASSERT_OK(memcmp(packet, rcv, pkt_size), + "second memcmp, data mismatch")) + goto out_close; + } + +out_close: + close(c); + close(p); + +out: + test_sockmap_strp__destroy(strp); +} + +/* Test strparser with verdict mode */ +static void test_sockmap_strp_verdict(int family, int sotype) +{ + int zero = 0, one = 1, sent, recvd, off; + int err, map; + int c0 = -1, p0 = -1, c1 = -1, p1 = -1; + struct test_sockmap_strp *strp = NULL; + char rcv[STRP_PKT_FULL_LEN + 1] = "0"; + + strp = sockmap_strp_init(&map, false, true); + if (!ASSERT_TRUE(strp, "sockmap_strp_init")) + return; + + /* We simulate a reverse proxy server. + * When p0 receives data from c0, we forward it to c1. + * From c1's perspective, it will consider this data + * as being sent by p1. + */ + err = create_socket_pairs(family, sotype, &c0, &c1, &p0, &p1); + if (!ASSERT_OK(err, "create_socket_pairs()")) + goto out; + + err = bpf_map_update_elem(map, &zero, &p0, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p0)")) + goto out_close; + + err = bpf_map_update_elem(map, &one, &p1, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p1)")) + goto out_close; + + sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0); + if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "xsend(c0)")) + goto out_close; + + recvd = recv_timeout(c1, rcv, sizeof(rcv), MSG_DONTWAIT, + IO_TIMEOUT_SEC); + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "recv_timeout(c1)") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "received data does not match the sent data")) + goto out_close; + + /* send again to ensure the stream is functioning correctly. */ + sent = xsend(c0, packet, STRP_PKT_FULL_LEN, 0); + if (!ASSERT_EQ(sent, STRP_PKT_FULL_LEN, "second xsend(c0)")) + goto out_close; + + /* partial read */ + off = STRP_PKT_FULL_LEN / 2; + recvd = recv_timeout(c1, rcv, off, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + recvd += recv_timeout(c1, rcv + off, sizeof(rcv) - off, MSG_DONTWAIT, + IO_TIMEOUT_SEC); + + if (!ASSERT_EQ(recvd, STRP_PKT_FULL_LEN, "partial recv_timeout(c1)") || + !ASSERT_OK(memcmp(packet, rcv, STRP_PKT_FULL_LEN), + "partial received data does not match the sent data")) + goto out_close; + +out_close: + close(c0); + close(c1); + close(p0); + close(p1); +out: + test_sockmap_strp__destroy(strp); +} + +void test_sockmap_strp(void) +{ + if (test__start_subtest("sockmap strp tcp pass")) + test_sockmap_strp_pass(AF_INET, SOCK_STREAM, false); + if (test__start_subtest("sockmap strp tcp v6 pass")) + test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, false); + if (test__start_subtest("sockmap strp tcp pass fionread")) + test_sockmap_strp_pass(AF_INET, SOCK_STREAM, true); + if (test__start_subtest("sockmap strp tcp v6 pass fionread")) + test_sockmap_strp_pass(AF_INET6, SOCK_STREAM, true); + if (test__start_subtest("sockmap strp tcp verdict")) + test_sockmap_strp_verdict(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp v6 verdict")) + test_sockmap_strp_verdict(AF_INET6, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp partial read")) + test_sockmap_strp_partial_read(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp multiple packets")) + test_sockmap_strp_multiple_pkt(AF_INET, SOCK_STREAM); + if (test__start_subtest("sockmap strp tcp dispatch")) + test_sockmap_strp_dispatch_pkt(AF_INET, SOCK_STREAM); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 05d0e07da394..ba6b3ec1156a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -2,7 +2,7 @@ #include <test_progs.h> #include "cgroup_helpers.h" -#include <linux/tcp.h> +#include <netinet/tcp.h> #include <linux/netlink.h> #include "sockopt_sk.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c index 151a4210028f..2461d183dee5 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c @@ -14,10 +14,16 @@ #include "netlink_helpers.h" #include "tc_helpers.h" +#define NETKIT_HEADROOM 32 +#define NETKIT_TAILROOM 8 + #define MARK 42 #define PRIO 0xeb9f #define ICMP_ECHO 8 +#define FLAG_ADJUST_ROOM (1 << 0) +#define FLAG_SAME_NETNS (1 << 1) + struct icmphdr { __u8 type; __u8 code; @@ -35,7 +41,7 @@ struct iplink_req { }; static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, - bool same_netns, int scrub, int peer_scrub) + int scrub, int peer_scrub, __u32 flags) { struct rtnl_handle rth = { .fd = -1 }; struct iplink_req req = {}; @@ -63,6 +69,10 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, addattr32(&req.n, sizeof(req), IFLA_NETKIT_SCRUB, scrub); addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_SCRUB, peer_scrub); addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode); + if (flags & FLAG_ADJUST_ROOM) { + addattr16(&req.n, sizeof(req), IFLA_NETKIT_HEADROOM, NETKIT_HEADROOM); + addattr16(&req.n, sizeof(req), IFLA_NETKIT_TAILROOM, NETKIT_TAILROOM); + } addattr_nest_end(&req.n, data); addattr_nest_end(&req.n, linkinfo); @@ -87,7 +97,7 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, " addr ee:ff:bb:cc:aa:dd"), "set hwaddress"); } - if (same_netns) { + if (flags & FLAG_SAME_NETNS) { ASSERT_OK(system("ip link set dev " netkit_peer " up"), "up peer"); ASSERT_OK(system("ip addr add dev " netkit_peer " 10.0.0.2/24"), @@ -184,8 +194,8 @@ void serial_test_tc_netkit_basic(void) int err, ifindex; err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -299,8 +309,8 @@ static void serial_test_tc_netkit_multi_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -428,8 +438,8 @@ static void serial_test_tc_netkit_multi_opts_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -543,8 +553,8 @@ void serial_test_tc_netkit_device(void) int err, ifindex, ifindex2; err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS, - &ifindex, true, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS); if (err) return; @@ -655,8 +665,8 @@ static void serial_test_tc_netkit_neigh_links_target(int mode, int target) int err, ifindex; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, 0); if (err) return; @@ -733,8 +743,8 @@ static void serial_test_tc_netkit_pkt_type_mode(int mode) struct bpf_link *link; err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, - &ifindex, true, NETKIT_SCRUB_DEFAULT, - NETKIT_SCRUB_DEFAULT); + &ifindex, NETKIT_SCRUB_DEFAULT, + NETKIT_SCRUB_DEFAULT, FLAG_SAME_NETNS); if (err) return; @@ -799,7 +809,7 @@ void serial_test_tc_netkit_pkt_type(void) serial_test_tc_netkit_pkt_type_mode(NETKIT_L3); } -static void serial_test_tc_netkit_scrub_type(int scrub) +static void serial_test_tc_netkit_scrub_type(int scrub, bool room) { LIBBPF_OPTS(bpf_netkit_opts, optl); struct test_tc_link *skel; @@ -807,7 +817,8 @@ static void serial_test_tc_netkit_scrub_type(int scrub) int err, ifindex; err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, - &ifindex, false, scrub, scrub); + &ifindex, scrub, scrub, + room ? FLAG_ADJUST_ROOM : 0); if (err) return; @@ -842,6 +853,8 @@ static void serial_test_tc_netkit_scrub_type(int scrub) ASSERT_EQ(skel->bss->seen_tc8, true, "seen_tc8"); ASSERT_EQ(skel->bss->mark, scrub == NETKIT_SCRUB_NONE ? MARK : 0, "mark"); ASSERT_EQ(skel->bss->prio, scrub == NETKIT_SCRUB_NONE ? PRIO : 0, "prio"); + ASSERT_EQ(skel->bss->headroom, room ? NETKIT_HEADROOM : 0, "headroom"); + ASSERT_EQ(skel->bss->tailroom, room ? NETKIT_TAILROOM : 0, "tailroom"); cleanup: test_tc_link__destroy(skel); @@ -852,6 +865,6 @@ cleanup: void serial_test_tc_netkit_scrub(void) { - serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT); - serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE); + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_DEFAULT, false); + serial_test_tc_netkit_scrub_type(NETKIT_SCRUB_NONE, true); } diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 3ee40ee9413a..8a0e1ff8a2dc 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -52,6 +52,8 @@ #include "verifier_map_ptr_mixing.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" +#include "verifier_may_goto_1.skel.h" +#include "verifier_may_goto_2.skel.h" #include "verifier_meta_access.skel.h" #include "verifier_movsx.skel.h" #include "verifier_mtu.skel.h" @@ -98,6 +100,7 @@ #include "verifier_xdp_direct_packet_access.skel.h" #include "verifier_bits_iter.skel.h" #include "verifier_lsm.skel.h" +#include "irq.skel.h" #define MAX_ENTRIES 11 @@ -181,6 +184,8 @@ void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } +void test_verifier_may_goto_1(void) { RUN(verifier_may_goto_1); } +void test_verifier_may_goto_2(void) { RUN(verifier_may_goto_2); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_movsx(void) { RUN(verifier_movsx); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } @@ -225,6 +230,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } void test_verifier_lsm(void) { RUN(verifier_lsm); } +void test_irq(void) { RUN(irq); } void test_verifier_mtu(void) { RUN(verifier_mtu); } static int init_test_val_map(struct bpf_object *obj, char *map_name) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 53d6ad8c2257..b2b2d85dbb1b 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -82,6 +82,8 @@ static void test_xdp_adjust_tail_grow2(void) /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ #if defined(__s390x__) int tailroom = 512; +#elif defined(__powerpc__) + int tailroom = 384; #else int tailroom = 320; #endif diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index 6d8b54124cb3..fb952703653e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -17,7 +17,7 @@ #include "network_helpers.h" #include <linux/if_bonding.h> #include <linux/limits.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <uapi/linux/netdev.h> #include "xdp_dummy.skel.h" diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c index e6a783c7f5db..937da9b7532a 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -2,6 +2,14 @@ #include <test_progs.h> #include <network_helpers.h> #include "test_xdp_context_test_run.skel.h" +#include "test_xdp_meta.skel.h" + +#define TX_ADDR "10.0.0.1" +#define RX_ADDR "10.0.0.2" +#define RX_NAME "veth0" +#define TX_NAME "veth1" +#define TX_NETNS "xdp_context_tx" +#define RX_NETNS "xdp_context_rx" void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, __u32 data_meta, __u32 data, __u32 data_end, @@ -103,3 +111,82 @@ void test_xdp_context_test_run(void) test_xdp_context_test_run__destroy(skel); } + +void test_xdp_context_functional(void) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + struct netns_obj *rx_ns = NULL, *tx_ns = NULL; + struct bpf_program *tc_prog, *xdp_prog; + struct test_xdp_meta *skel = NULL; + struct nstoken *nstoken = NULL; + int rx_ifindex; + int ret; + + tx_ns = netns_new(TX_NETNS, false); + if (!ASSERT_OK_PTR(tx_ns, "create tx_ns")) + return; + + rx_ns = netns_new(RX_NETNS, false); + if (!ASSERT_OK_PTR(rx_ns, "create rx_ns")) + goto close; + + SYS(close, "ip link add " RX_NAME " netns " RX_NETNS + " type veth peer name " TX_NAME " netns " TX_NETNS); + + nstoken = open_netns(RX_NETNS); + if (!ASSERT_OK_PTR(nstoken, "setns rx_ns")) + goto close; + + SYS(close, "ip addr add " RX_ADDR "/24 dev " RX_NAME); + SYS(close, "ip link set dev " RX_NAME " up"); + + skel = test_xdp_meta__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + rx_ifindex = if_nametoindex(RX_NAME); + if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx")) + goto close; + + tc_hook.ifindex = rx_ifindex; + ret = bpf_tc_hook_create(&tc_hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto close; + + tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls"); + if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog")) + goto close; + + tc_opts.prog_fd = bpf_program__fd(tc_prog); + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto close; + + xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp"); + if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog")) + goto close; + + ret = bpf_xdp_attach(rx_ifindex, + bpf_program__fd(xdp_prog), + 0, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + close_netns(nstoken); + + nstoken = open_netns(TX_NETNS); + if (!ASSERT_OK_PTR(nstoken, "setns tx_ns")) + goto close; + + SYS(close, "ip addr add " TX_ADDR "/24 dev " TX_NAME); + SYS(close, "ip link set dev " TX_NAME " up"); + ASSERT_OK(SYS_NOFAIL("ping -c 1 " RX_ADDR), "ping"); + +close: + close_netns(nstoken); + test_xdp_meta__destroy(skel); + netns_free(rx_ns); + netns_free(tx_ns); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index c7f74f068e78..df27535995af 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -52,10 +52,10 @@ static void test_xdp_with_cpumap_helpers(void) ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id"); /* send a packet to trigger any potential bugs in there */ - char data[10] = {}; + char data[ETH_HLEN] = {}; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index 27ffed17d4be..461ab18705d5 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -23,7 +23,7 @@ static void test_xdp_with_devmap_helpers(void) __u32 len = sizeof(info); int err, dm_fd, dm_fd_redir, map_fd; struct nstoken *nstoken = NULL; - char data[10] = {}; + char data[ETH_HLEN] = {}; __u32 idx = 0; SYS(out_close, "ip netns add %s", TEST_NS); @@ -58,7 +58,7 @@ static void test_xdp_with_devmap_helpers(void) /* send a packet to trigger any potential bugs in there */ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); @@ -158,7 +158,7 @@ static void test_xdp_with_devmap_helpers_veth(void) struct nstoken *nstoken = NULL; __u32 len = sizeof(info); int err, dm_fd, dm_fd_redir, map_fd, ifindex_dst; - char data[10] = {}; + char data[ETH_HLEN] = {}; __u32 idx = 0; SYS(out_close, "ip netns add %s", TEST_NS); @@ -208,7 +208,7 @@ static void test_xdp_with_devmap_helpers_veth(void) /* send a packet to trigger any potential bugs in there */ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, - .data_size_in = 10, + .data_size_in = sizeof(data), .flags = BPF_F_TEST_XDP_LIVE_FRAMES, .repeat = 1, ); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index bad0ea167be7..7dac044664ac 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -7,10 +7,11 @@ #include <linux/if_link.h> #include <linux/ipv6.h> #include <linux/in6.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_endian.h> #include <uapi/linux/netdev.h> #include "test_xdp_do_redirect.skel.h" +#include "xdp_dummy.skel.h" struct udp_packet { struct ethhdr eth; @@ -246,3 +247,166 @@ out: SYS_NOFAIL("ip netns del testns"); test_xdp_do_redirect__destroy(skel); } + +#define NS_NB 3 +#define NS0 "NS0" +#define NS1 "NS1" +#define NS2 "NS2" +#define IPV4_NETWORK "10.1.1" +#define VETH1_INDEX 111 +#define VETH2_INDEX 222 + +struct test_data { + struct netns_obj *ns[NS_NB]; + u32 xdp_flags; +}; + +static void cleanup(struct test_data *data) +{ + int i; + + for (i = 0; i < NS_NB; i++) + netns_free(data->ns[i]); +} + +/** + * ping_setup - + * Create two veth peers and forward packets in-between using XDP + * + * ------------ ------------ + * | NS1 | | NS2 | + * | veth0 | | veth0 | + * | 10.1.1.1 | | 10.1.1.2 | + * -----|------ ------|----- + * | | + * | | + * -----|-----------------------|------- + * | veth1 veth2 | + * | (id:111) (id:222) | + * | | | | + * | ----- xdp forwarding ----- | + * | | + * | NS0 | + * ------------------------------------- + */ +static int ping_setup(struct test_data *data) +{ + int i; + + data->ns[0] = netns_new(NS0, false); + if (!ASSERT_OK_PTR(data->ns[0], "create ns")) + return -1; + + for (i = 1; i < NS_NB; i++) { + char ns_name[4] = {}; + + snprintf(ns_name, 4, "NS%d", i); + data->ns[i] = netns_new(ns_name, false); + if (!ASSERT_OK_PTR(data->ns[i], "create ns")) + goto fail; + + SYS(fail, + "ip -n %s link add veth%d index %d%d%d type veth peer name veth0 netns %s", + NS0, i, i, i, i, ns_name); + SYS(fail, "ip -n %s link set veth%d up", NS0, i); + + SYS(fail, "ip -n %s addr add %s.%d/24 dev veth0", ns_name, IPV4_NETWORK, i); + SYS(fail, "ip -n %s link set veth0 up", ns_name); + } + + return 0; + +fail: + cleanup(data); + return -1; +} + +static void ping_test(struct test_data *data) +{ + struct test_xdp_do_redirect *skel = NULL; + struct xdp_dummy *skel_dummy = NULL; + struct nstoken *nstoken = NULL; + int i, ret; + + skel_dummy = xdp_dummy__open_and_load(); + if (!ASSERT_OK_PTR(skel_dummy, "open and load xdp_dummy skeleton")) + goto close; + + for (i = 1; i < NS_NB; i++) { + char ns_name[4] = {}; + + snprintf(ns_name, 4, "NS%d", i); + nstoken = open_netns(ns_name); + if (!ASSERT_OK_PTR(nstoken, "open ns")) + goto close; + + ret = bpf_xdp_attach(if_nametoindex("veth0"), + bpf_program__fd(skel_dummy->progs.xdp_dummy_prog), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach dummy_prog")) + goto close; + + close_netns(nstoken); + nstoken = NULL; + } + + skel = test_xdp_do_redirect__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "open NS0")) + goto close; + + ret = bpf_xdp_attach(VETH2_INDEX, + bpf_program__fd(skel->progs.xdp_redirect_to_111), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + ret = bpf_xdp_attach(VETH1_INDEX, + bpf_program__fd(skel->progs.xdp_redirect_to_222), + data->xdp_flags, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + close_netns(nstoken); + nstoken = NULL; + + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "open NS1")) + goto close; + + SYS(close, "ping -c 1 %s.2 > /dev/null", IPV4_NETWORK); + +close: + close_netns(nstoken); + xdp_dummy__destroy(skel_dummy); + test_xdp_do_redirect__destroy(skel); +} + + +static void xdp_redirect_ping(u32 xdp_flags) +{ + struct test_data data = {}; + + if (ping_setup(&data) < 0) + return; + + data.xdp_flags = xdp_flags; + ping_test(&data); + cleanup(&data); +} + +void test_xdp_index_redirect(void) +{ + if (test__start_subtest("noflag")) + xdp_redirect_ping(0); + + if (test__start_subtest("drvflag")) + xdp_redirect_ping(XDP_FLAGS_DRV_MODE); + + if (test__start_subtest("skbflag")) + xdp_redirect_ping(XDP_FLAGS_SKB_MODE); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c index e1bf141d3401..3f9146d83d79 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c @@ -3,7 +3,7 @@ #include <network_helpers.h> #include <bpf/btf.h> #include <linux/if_link.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <net/if.h> #include <unistd.h> diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c index c87ee2bf558c..3d47878ef6bf 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c @@ -10,7 +10,7 @@ #include <linux/errqueue.h> #include <linux/if_link.h> #include <linux/net_tstamp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <sys/mman.h> #include <net/if.h> #include <poll.h> @@ -133,23 +133,6 @@ static void close_xsk(struct xsk *xsk) munmap(xsk->umem_area, UMEM_SIZE); } -static void ip_csum(struct iphdr *iph) -{ - __u32 sum = 0; - __u16 *p; - int i; - - iph->check = 0; - p = (void *)iph; - for (i = 0; i < sizeof(*iph) / sizeof(*p); i++) - sum += p[i]; - - while (sum >> 16) - sum = (sum & 0xffff) + (sum >> 16); - - iph->check = ~sum; -} - static int generate_packet(struct xsk *xsk, __u16 dst_port) { struct xsk_tx_metadata *meta; @@ -192,7 +175,7 @@ static int generate_packet(struct xsk *xsk, __u16 dst_port) iph->protocol = IPPROTO_UDP; ASSERT_EQ(inet_pton(FAMILY, TX_ADDR, &iph->saddr), 1, "inet_pton(TX_ADDR)"); ASSERT_EQ(inet_pton(FAMILY, RX_ADDR, &iph->daddr), 1, "inet_pton(RX_ADDR)"); - ip_csum(iph); + iph->check = build_ip_csum(iph); udph->source = htons(UDP_SOURCE_PORT); udph->dest = htons(dst_port); diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c index b7e175cd0af0..b3f77b4561c8 100644 --- a/tools/testing/selftests/bpf/progs/bad_struct_ops.c +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 56c764df8196..5d6fc7f01ebb 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -2,7 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr *ptr; diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c new file mode 100644 index 000000000000..e32b07d802bb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_direct_packet_access.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +__u32 data_end; + +SEC("cgroup_skb/ingress") +int direct_packet_access(struct __sk_buff *skb) +{ + data_end = skb->data_end; + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index dfd817d0348c..bd8f15229f5c 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -192,7 +192,7 @@ done: /* Can't add a dynptr to a map */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int add_dynptr_to_map1(void *ctx) { struct bpf_dynptr ptr; @@ -210,7 +210,7 @@ int add_dynptr_to_map1(void *ctx) /* Can't add a struct with an embedded dynptr to a map */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int add_dynptr_to_map2(void *ctx) { struct test_info x; @@ -398,7 +398,7 @@ int data_slice_missing_null_check2(void *ctx) * dynptr argument */ SEC("?raw_tp") -__failure __msg("invalid indirect read from stack") +__failure __msg("invalid read from stack") int invalid_helper1(void *ctx) { struct bpf_dynptr ptr; diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c index 33d3a57bee90..35fec7c75bef 100644 --- a/tools/testing/selftests/bpf/progs/epilogue_exit.c +++ b/tools/testing/selftests/bpf/progs/epilogue_exit.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c index 7275dd594de0..153514691ba4 100644 --- a/tools/testing/selftests/bpf/progs/epilogue_tailcall.c +++ b/tools/testing/selftests/bpf/progs/epilogue_tailcall.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c index fe0f3fa5aab6..8a0fdff89927 100644 --- a/tools/testing/selftests/bpf/progs/exceptions_fail.c +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -131,7 +131,7 @@ int reject_subprog_with_lock(void *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") int reject_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); @@ -147,7 +147,7 @@ __noinline static int throwing_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_rcu_read_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_rcu_read_lock-ed region") int reject_subprog_with_rcu_read_lock(void *ctx) { bpf_rcu_read_lock(); diff --git a/tools/testing/selftests/bpf/progs/find_vma.c b/tools/testing/selftests/bpf/progs/find_vma.c index 38034fb82530..02b82774469c 100644 --- a/tools/testing/selftests/bpf/progs/find_vma.c +++ b/tools/testing/selftests/bpf/progs/find_vma.c @@ -25,7 +25,7 @@ static long check_vma(struct task_struct *task, struct vm_area_struct *vma, { if (vma->vm_file) bpf_probe_read_kernel_str(d_iname, DNAME_INLINE_LEN - 1, - vma->vm_file->f_path.dentry->d_iname); + vma->vm_file->f_path.dentry->d_shortname.string); /* check for VM_EXEC */ if (vma->vm_flags & VM_EXEC) diff --git a/tools/testing/selftests/bpf/progs/free_timer.c b/tools/testing/selftests/bpf/progs/free_timer.c new file mode 100644 index 000000000000..4501ae8fc414 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/free_timer.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#include <linux/bpf.h> +#include <time.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#define MAX_ENTRIES 8 + +struct map_value { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, MAX_ENTRIES); +} map SEC(".maps"); + +static int timer_cb(void *map, void *key, struct map_value *value) +{ + volatile int sum = 0; + int i; + + bpf_for(i, 0, 1024 * 1024) sum += i; + + return 0; +} + +static int start_cb(int key) +{ + struct map_value *value; + + value = bpf_map_lookup_elem(&map, (void *)&key); + if (!value) + return 0; + + bpf_timer_init(&value->timer, &map, CLOCK_MONOTONIC); + bpf_timer_set_callback(&value->timer, timer_cb); + /* Hope 100us will be enough to wake-up and run the overwrite thread */ + bpf_timer_start(&value->timer, 100000, BPF_F_TIMER_CPU_PIN); + + return 0; +} + +static int overwrite_cb(int key) +{ + struct map_value zero = {}; + + /* Free the timer which may run on other CPU */ + bpf_map_update_elem(&map, (void *)&key, &zero, BPF_ANY); + + return 0; +} + +SEC("syscall") +int BPF_PROG(start_timer) +{ + bpf_loop(MAX_ENTRIES, start_cb, NULL, 0); + return 0; +} + +SEC("syscall") +int BPF_PROG(overwrite_timer) +{ + bpf_loop(MAX_ENTRIES, overwrite_cb, NULL, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/irq.c b/tools/testing/selftests/bpf/progs/irq.c new file mode 100644 index 000000000000..b0b53d980964 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/irq.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +unsigned long global_flags; + +extern void bpf_local_irq_save(unsigned long *) __weak __ksym; +extern void bpf_local_irq_restore(unsigned long *) __weak __ksym; +extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym; + +SEC("?tc") +__failure __msg("arg#0 doesn't point to an irq flag on stack") +int irq_save_bad_arg(struct __sk_buff *ctx) +{ + bpf_local_irq_save(&global_flags); + return 0; +} + +SEC("?tc") +__failure __msg("arg#0 doesn't point to an irq flag on stack") +int irq_restore_bad_arg(struct __sk_buff *ctx) +{ + bpf_local_irq_restore(&global_flags); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_2(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_save(&flags3); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_minus_2(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + return 0; +} + +static __noinline void local_irq_save(unsigned long *flags) +{ + bpf_local_irq_save(flags); +} + +static __noinline void local_irq_restore(unsigned long *flags) +{ + bpf_local_irq_restore(flags); +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_1_subprog(struct __sk_buff *ctx) +{ + unsigned long flags; + + local_irq_save(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_2_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + local_irq_save(&flags1); + local_irq_save(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + return 0; +} + +SEC("?tc") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_local_irq_save-ed region") +int irq_restore_missing_3_minus_2_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + return 0; +} + +SEC("?tc") +__success +int irq_balance(struct __sk_buff *ctx) +{ + unsigned long flags; + + local_irq_save(&flags); + local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__success +int irq_balance_n(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + local_irq_restore(&flags1); + return 0; +} + +static __noinline void local_irq_balance(void) +{ + unsigned long flags; + + local_irq_save(&flags); + local_irq_restore(&flags); +} + +static __noinline void local_irq_balance_n(void) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save(&flags1); + local_irq_save(&flags2); + local_irq_save(&flags3); + local_irq_restore(&flags3); + local_irq_restore(&flags2); + local_irq_restore(&flags1); +} + +SEC("?tc") +__success +int irq_balance_subprog(struct __sk_buff *ctx) +{ + local_irq_balance(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("sleepable helper bpf_copy_from_user#") +int irq_sleepable_helper(void *ctx) +{ + unsigned long flags; + u32 data; + + local_irq_save(&flags); + bpf_copy_from_user(&data, sizeof(data), NULL); + local_irq_restore(&flags); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("kernel func bpf_copy_from_user_str is sleepable within IRQ-disabled region") +int irq_sleepable_kfunc(void *ctx) +{ + unsigned long flags; + u32 data; + + local_irq_save(&flags); + bpf_copy_from_user_str(&data, sizeof(data), NULL, 0); + local_irq_restore(&flags); + return 0; +} + +int __noinline global_local_irq_balance(void) +{ + local_irq_balance_n(); + return 0; +} + +SEC("?tc") +__failure __msg("global function calls are not allowed with IRQs disabled") +int irq_global_subprog(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + global_local_irq_balance(); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_restore(&flags1); + bpf_local_irq_restore(&flags2); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo_3(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags2); + bpf_local_irq_restore(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags1); + bpf_local_irq_restore(&flags3); + return 0; +} + +static __noinline void local_irq_save_3(unsigned long *flags1, unsigned long *flags2, + unsigned long *flags3) +{ + local_irq_save(flags1); + local_irq_save(flags2); + local_irq_save(flags3); +} + +SEC("?tc") +__success +int irq_restore_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_4_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + unsigned long flags4; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_save(&flags4); + bpf_local_irq_restore(&flags4); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_restore_ooo_3_subprog(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags2; + unsigned long flags3; + + local_irq_save_3(&flags1, &flags2, &flags3); + bpf_local_irq_restore(&flags3); + bpf_local_irq_restore(&flags2); + bpf_local_irq_save(&flags3); + bpf_local_irq_restore(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_restore_invalid(struct __sk_buff *ctx) +{ + unsigned long flags1; + unsigned long flags = 0xfaceb00c; + + bpf_local_irq_save(&flags1); + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("expected uninitialized") +int irq_save_invalid(struct __sk_buff *ctx) +{ + unsigned long flags1; + + bpf_local_irq_save(&flags1); + bpf_local_irq_save(&flags1); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_restore_iter(struct __sk_buff *ctx) +{ + struct bpf_iter_num it; + + bpf_iter_num_new(&it, 0, 42); + bpf_local_irq_restore((unsigned long *)&it); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=1") +int irq_save_iter(struct __sk_buff *ctx) +{ + struct bpf_iter_num it; + + /* Ensure same sized slot has st->ref_obj_id set, so we reject based on + * slot_type != STACK_IRQ_FLAG... + */ + _Static_assert(sizeof(it) == sizeof(unsigned long), "broken iterator size"); + + bpf_iter_num_new(&it, 0, 42); + bpf_local_irq_save((unsigned long *)&it); + bpf_local_irq_restore((unsigned long *)&it); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_flag_overwrite(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + flags = 0xdeadbeef; + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("expected an initialized") +int irq_flag_overwrite_partial(struct __sk_buff *ctx) +{ + unsigned long flags; + + bpf_local_irq_save(&flags); + *(((char *)&flags) + 1) = 0xff; + bpf_local_irq_restore(&flags); + return 0; +} + +SEC("?tc") +__failure __msg("cannot restore irq state out of order") +int irq_ooo_refs_array(struct __sk_buff *ctx) +{ + unsigned long flags[4]; + struct { int i; } *p; + + /* refs=1 */ + bpf_local_irq_save(&flags[0]); + + /* refs=1,2 */ + p = bpf_obj_new(typeof(*p)); + if (!p) { + bpf_local_irq_restore(&flags[0]); + return 0; + } + + /* refs=1,2,3 */ + bpf_local_irq_save(&flags[1]); + + /* refs=1,2,3,4 */ + bpf_local_irq_save(&flags[2]); + + /* Now when we remove ref=2, the verifier must not break the ordering in + * the refs array between 1,3,4. With an older implementation, the + * verifier would swap the last element with the removed element, but to + * maintain the stack property we need to use memmove. + */ + bpf_obj_drop(p); + + /* Save and restore to reset active_irq_id to 3, as the ordering is now + * refs=1,4,3. When restoring the linear scan will find prev_id in order + * as 3 instead of 4. + */ + bpf_local_irq_save(&flags[3]); + bpf_local_irq_restore(&flags[3]); + + /* With the incorrect implementation, we can release flags[1], flags[2], + * and flags[0], i.e. in the wrong order. + */ + bpf_local_irq_restore(&flags[1]); + bpf_local_irq_restore(&flags[2]); + bpf_local_irq_restore(&flags[0]); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 7c969c127573..190822b2f08b 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -524,11 +524,11 @@ int iter_subprog_iters(const void *ctx) } struct { - __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); __type(value, int); __uint(max_entries, 1000); -} arr_map SEC(".maps"); +} hash_map SEC(".maps"); SEC("?raw_tp") __failure __msg("invalid mem access 'scalar'") @@ -539,7 +539,7 @@ int iter_err_too_permissive1(const void *ctx) MY_PID_GUARD(); - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (!map_val) return 0; @@ -561,12 +561,12 @@ int iter_err_too_permissive2(const void *ctx) MY_PID_GUARD(); - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (!map_val) return 0; bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); } *map_val = 123; @@ -585,7 +585,7 @@ int iter_err_too_permissive3(const void *ctx) MY_PID_GUARD(); bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); found = true; } @@ -606,7 +606,7 @@ int iter_tricky_but_fine(const void *ctx) MY_PID_GUARD(); bpf_repeat(1000000) { - map_val = bpf_map_lookup_elem(&arr_map, &key); + map_val = bpf_map_lookup_elem(&hash_map, &key); if (map_val) { found = true; break; diff --git a/tools/testing/selftests/bpf/progs/iters_testmod.c b/tools/testing/selftests/bpf/progs/iters_testmod.c index df1d3db60b1b..9e4b45201e69 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod.c @@ -4,7 +4,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index f9789e668297..82190d79de37 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c index 7632d9ecb253..b9670e9a6e3d 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_destructive_test(void) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index 08fae306539c..a1963497f0bf 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -2,7 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct syscall_test_args { __u8 data[16]; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c index d532af07decf..48f64827cd93 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_fail(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index f502f755f567..8b86113a0126 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -2,7 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_test4(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c index 2380c75e74ce..8e150e85b50d 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" extern const int bpf_prog_active __ksym; int active_res = -1; diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index b092a72b2c9d..d736506a4c80 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -6,7 +6,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "../bpf_experimental.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct plain_local; diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index ab0ce1d01a4a..edaba481db9d 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -2,7 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 450bb373b179..4c0ff01f1a96 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" struct map_value { char buf[8]; @@ -345,7 +345,7 @@ int reject_indirect_global_func_access(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=5 alloc_insn=") +__failure __msg("Unreleased reference id=4 alloc_insn=") int kptr_xchg_ref_state(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c index 7f9ef701f5de..51a4fe64c917 100644 --- a/tools/testing/selftests/bpf/progs/missed_kprobe.c +++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c @@ -2,7 +2,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c index 8ea71cbd6c45..29c18d869ec1 100644 --- a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c +++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c @@ -2,7 +2,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; @@ -46,3 +46,9 @@ int test5(struct pt_regs *ctx) { return 0; } + +SEC("kprobe.session/bpf_kfunc_common_test") +int test6(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/nested_acquire.c b/tools/testing/selftests/bpf/progs/nested_acquire.c index 8e521a21d995..49ad7b9adf56 100644 --- a/tools/testing/selftests/bpf/progs/nested_acquire.c +++ b/tools/testing/selftests/bpf/progs/nested_acquire.c @@ -4,7 +4,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/preempt_lock.c b/tools/testing/selftests/bpf/progs/preempt_lock.c index 885377e83607..6c5797bf0ead 100644 --- a/tools/testing/selftests/bpf/progs/preempt_lock.c +++ b/tools/testing/selftests/bpf/progs/preempt_lock.c @@ -5,8 +5,10 @@ #include "bpf_misc.h" #include "bpf_experimental.h" +extern int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void *unsafe_ptr__ign, u64 flags) __weak __ksym; + SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -14,7 +16,7 @@ int preempt_lock_missing_1(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -23,7 +25,7 @@ int preempt_lock_missing_2(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -33,7 +35,7 @@ int preempt_lock_missing_3(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_3_minus_2(struct __sk_buff *ctx) { bpf_preempt_disable(); @@ -55,7 +57,7 @@ static __noinline void preempt_enable(void) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -63,7 +65,7 @@ int preempt_lock_missing_1_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -72,7 +74,7 @@ int preempt_lock_missing_2_subprog(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_preempt_disable-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_preempt_disable-ed region") int preempt_lock_missing_2_minus_1_subprog(struct __sk_buff *ctx) { preempt_disable(); @@ -113,6 +115,18 @@ int preempt_sleepable_helper(void *ctx) return 0; } +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("kernel func bpf_copy_from_user_str is sleepable within non-preemptible region") +int preempt_sleepable_kfunc(void *ctx) +{ + u32 data; + + bpf_preempt_disable(); + bpf_copy_from_user_str(&data, sizeof(data), NULL, 0); + bpf_preempt_enable(); + return 0; +} + int __noinline preempt_global_subprog(void) { preempt_balance_subprog(); diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue.c b/tools/testing/selftests/bpf/progs/pro_epilogue.c index 44bc3f06b4b6..d97d6e07ef5c 100644 --- a/tools/testing/selftests/bpf/progs/pro_epilogue.c +++ b/tools/testing/selftests/bpf/progs/pro_epilogue.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c index 3529e53be355..6048d79be48b 100644 --- a/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c +++ b/tools/testing/selftests/bpf/progs/pro_epilogue_goto_start.c @@ -4,8 +4,8 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sock_addr_kern.c b/tools/testing/selftests/bpf/progs/sock_addr_kern.c index 8386bb15ccdc..84ad515eafd6 100644 --- a/tools/testing/selftests/bpf/progs/sock_addr_kern.c +++ b/tools/testing/selftests/bpf/progs/sock_addr_kern.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Google LLC */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("syscall") int init_sock(struct init_sock_args *args) diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c index d7fdcabe7d90..284a5b008e0c 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c index 3c822103bd40..d8cc99f5c2e2 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_forgotten_cb.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c index b450f72e744a..ccab3935aa42 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c index 6283099ec383..8b5515f4f724 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c index 4c56d4a9d9f4..71c420c3a5a6 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_module.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c index 9efcc6e4d356..5b23ea817f1f 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c index fa2021388485..5d0937fa07be 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_nulled_out_cb.c @@ -2,7 +2,7 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c index 8ea57e5348ab..0e4d2ff63ab8 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c index 1f55ec4cee37..58d5d8dc2235 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_fail.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c index f2f300d50988..31e58389bb8b 100644 --- a/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c +++ b/tools/testing/selftests/bpf/progs/struct_ops_private_stack_recur.c @@ -3,7 +3,7 @@ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/syscall.c b/tools/testing/selftests/bpf/progs/syscall.c index 0f4dfb770c32..b698cc62a371 100644 --- a/tools/testing/selftests/bpf/progs/syscall.c +++ b/tools/testing/selftests/bpf/progs/syscall.c @@ -76,9 +76,9 @@ static int btf_load(void) .magic = BTF_MAGIC, .version = BTF_VERSION, .hdr_len = sizeof(struct btf_header), - .type_len = sizeof(__u32) * 8, - .str_off = sizeof(__u32) * 8, - .str_len = sizeof(__u32), + .type_len = sizeof(raw_btf.types), + .str_off = offsetof(struct btf_blob, str) - offsetof(struct btf_blob, types), + .str_len = sizeof(raw_btf.str), }, .types = { /* long */ diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index 683c8aaa63da..f344c6835e84 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -15,7 +15,7 @@ #include <linux/ipv6.h> #include <linux/pkt_cls.h> #include <linux/tcp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.h b/tools/testing/selftests/bpf/progs/test_cls_redirect.h index 233b089d1fba..eb55cb8a3dbd 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.h +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.h @@ -10,7 +10,7 @@ #include <linux/in.h> #include <linux/ip.h> #include <linux/ipv6.h> -#include <linux/udp.h> +#include <netinet/udp.h> /* offsetof() is used in static asserts, and the libbpf-redefined CO-RE * friendly version breaks compilation for older clang versions <= 15 diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index 464515b824b9..d0f7670351e5 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -15,7 +15,7 @@ #include <linux/ipv6.h> #include <linux/pkt_cls.h> #include <linux/tcp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c index 6afa834756e9..fac33a14f200 100644 --- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c +++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c @@ -6,13 +6,20 @@ #include <stdbool.h> extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; +extern bool CONFIG_PPC_FTRACE_OUT_OF_LINE __kconfig __weak; +extern bool CONFIG_KPROBES_ON_FTRACE __kconfig __weak; +extern bool CONFIG_PPC64 __kconfig __weak; -/* This function is here to have CONFIG_X86_KERNEL_IBT - * used and added to object BTF. +/* This function is here to have CONFIG_X86_KERNEL_IBT, + * CONFIG_PPC_FTRACE_OUT_OF_LINE, CONFIG_KPROBES_ON_FTRACE, + * CONFIG_PPC6 used and added to object BTF. */ int unused(void) { - return CONFIG_X86_KERNEL_IBT ? 0 : 1; + return CONFIG_X86_KERNEL_IBT || + CONFIG_PPC_FTRACE_OUT_OF_LINE || + CONFIG_KPROBES_ON_FTRACE || + CONFIG_PPC64 ? 0 : 1; } SEC("kprobe") diff --git a/tools/testing/selftests/bpf/progs/test_global_func10.c b/tools/testing/selftests/bpf/progs/test_global_func10.c index 5da001ca57a5..09d027bd3ea8 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func10.c +++ b/tools/testing/selftests/bpf/progs/test_global_func10.c @@ -26,7 +26,7 @@ __noinline int foo(const struct Big *big) } SEC("cgroup_skb/ingress") -__failure __msg("invalid indirect access to stack") +__failure __msg("invalid read from stack") int global_func10(struct __sk_buff *skb) { const struct Small small = {.x = skb->len }; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c index 7ac7e1de34d8..0ad1bf1ede8d 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c @@ -4,7 +4,7 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" #include "bpf_kfuncs.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" SEC("tc") int kfunc_dynptr_nullable_test1(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index cc1a012d038f..fb07f5773888 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -5,7 +5,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" __u32 raw_tp_read_sz = 0; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_strp.c b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c new file mode 100644 index 000000000000..dde3d5bec515 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_strp.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +int verdict_max_size = 10000; +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map SEC(".maps"); + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict(struct __sk_buff *skb) +{ + __u32 one = 1; + + if (skb->len > verdict_max_size) + return SK_PASS; + + return bpf_sk_redirect_map(skb, &sock_map, one, 0); +} + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict_pass(struct __sk_buff *skb) +{ + return SK_PASS; +} + +SEC("sk_skb/stream_parser") +int prog_skb_parser(struct __sk_buff *skb) +{ + return skb->len; +} + +SEC("sk_skb/stream_parser") +int prog_skb_parser_partial(struct __sk_buff *skb) +{ + /* agreement with the test program on a 4-byte size header + * and 6-byte body. + */ + if (skb->len < 4) { + /* need more header to determine full length */ + return 0; + } + /* return full length decoded from header. + * the return value may be larger than skb->len which + * means framework must wait body coming. + */ + return 10; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c index 10d825928499..630f12e51b07 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_link.c +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -8,6 +8,7 @@ #include <linux/if_packet.h> #include <bpf/bpf_endian.h> #include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> char LICENSE[] SEC("license") = "GPL"; @@ -27,6 +28,7 @@ bool seen_host; bool seen_mcast; int mark, prio; +unsigned short headroom, tailroom; SEC("tc/ingress") int tc1(struct __sk_buff *skb) @@ -104,11 +106,24 @@ out: return TCX_PASS; } +struct sk_buff { + struct net_device *dev; +}; + +struct net_device { + unsigned short needed_headroom; + unsigned short needed_tailroom; +}; + SEC("tc/egress") int tc8(struct __sk_buff *skb) { + struct net_device *dev = BPF_CORE_READ((struct sk_buff *)skb, dev); + seen_tc8 = true; mark = skb->mark; prio = skb->priority; + headroom = BPF_CORE_READ(dev, needed_headroom); + tailroom = BPF_CORE_READ(dev, needed_tailroom); return TCX_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c index bba3e37f749b..39ff06f2c834 100644 --- a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -3,7 +3,7 @@ #include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" #include "bpf_misc.h" SEC("tp_btf/bpf_testmod_test_nullable_bare") diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 81bb38d72ced..dc74d8cf9e3f 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -10,6 +10,8 @@ int _xdp_adjust_tail_grow(struct xdp_md *xdp) /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ #if defined(__TARGET_ARCH_s390) int tailroom = 512; +#elif defined(__TARGET_ARCH_powerpc) + int tailroom = 384; #else int tailroom = 320; #endif diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c index 3abf068b8446..5928ed0911ca 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -98,6 +98,18 @@ int xdp_count_pkts(struct xdp_md *xdp) return XDP_DROP; } +SEC("xdp") +int xdp_redirect_to_111(struct xdp_md *xdp) +{ + return bpf_redirect(111, 0); +} + +SEC("xdp") +int xdp_redirect_to_222(struct xdp_md *xdp) +{ + return bpf_redirect(222, 0); +} + SEC("tc") int tc_count_pkts(struct __sk_buff *skb) { diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c index a7c4a7d49fe6..fe2d71ae0e71 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c @@ -8,7 +8,7 @@ #define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) #define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem -SEC("t") +SEC("tc") int ing_cls(struct __sk_buff *ctx) { __u8 *data, *data_meta, *data_end; @@ -28,7 +28,7 @@ int ing_cls(struct __sk_buff *ctx) return diff ? TC_ACT_SHOT : TC_ACT_OK; } -SEC("x") +SEC("xdp") int ing_xdp(struct xdp_md *ctx) { __u8 *data, *data_meta, *data_end; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_redirect.c deleted file mode 100644 index b778cad45485..000000000000 --- a/tools/testing/selftests/bpf/progs/test_xdp_redirect.c +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2017 VMware - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> - -SEC("redirect_to_111") -int xdp_redirect_to_111(struct xdp_md *xdp) -{ - return bpf_redirect(111, 0); -} -SEC("redirect_to_222") -int xdp_redirect_to_222(struct xdp_md *xdp) -{ - return bpf_redirect(222, 0); -} - -char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/uninit_stack.c b/tools/testing/selftests/bpf/progs/uninit_stack.c index 8a403470e557..046a204c8fc6 100644 --- a/tools/testing/selftests/bpf/progs/uninit_stack.c +++ b/tools/testing/selftests/bpf/progs/uninit_stack.c @@ -70,7 +70,8 @@ __naked int helper_uninit_to_misc(void *ctx) r1 = r10; \ r1 += -128; \ r2 = 32; \ - call %[bpf_trace_printk]; \ + r3 = 0; \ + call %[bpf_probe_read_user]; \ /* Call to dummy() forces print_verifier_state(..., true), \ * thus showing the stack state, matched by __msg(). \ */ \ @@ -79,7 +80,7 @@ __naked int helper_uninit_to_misc(void *ctx) exit; \ " : - : __imm(bpf_trace_printk), + : __imm(bpf_probe_read_user), __imm(dummy) : __clobber_all); } diff --git a/tools/testing/selftests/bpf/progs/unsupported_ops.c b/tools/testing/selftests/bpf/progs/unsupported_ops.c index 9180365a3568..8aa2e0dd624e 100644 --- a/tools/testing/selftests/bpf/progs/unsupported_ops.c +++ b/tools/testing/selftests/bpf/progs/unsupported_ops.c @@ -4,7 +4,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod.h" +#include "../test_kmods/bpf_testmod.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c index 4195aa824ba5..0a187ff725cc 100644 --- a/tools/testing/selftests/bpf/progs/verifier_array_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -29,6 +29,20 @@ struct { } map_array_wo SEC(".maps"); struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, struct test_val); +} map_array_pcpu SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, struct test_val); +} map_array SEC(".maps"); + +struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, long long); @@ -525,4 +539,193 @@ l0_%=: exit; \ : __clobber_all); } +SEC("socket") +__description("valid map access into an array using constant without nullness") +__success __retval(4) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int an_array_with_a_constant_no_nullness(void) +{ + /* Need 8-byte alignment for spill tracking */ + __u32 __attribute__((aligned(8))) key = 1; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("valid multiple map access into an array using constant without nullness") +__success __retval(8) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}") +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int multiple_array_with_a_constant_no_nullness(void) +{ + __u32 __attribute__((aligned(8))) key = 1; + __u32 __attribute__((aligned(8))) key2 = 0; + struct test_val *val, *val2; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + val2 = bpf_map_lookup_elem(&map_array, &key2); + val2->index = offsetof(struct test_val, foo); + + return val->index + val2->index; +} + +SEC("socket") +__description("valid map access into an array using natural aligned 32-bit constant 0 without nullness") +__success __retval(4) +unsigned int an_array_with_a_32bit_constant_0_no_nullness(void) +{ + /* Unlike the above tests, 32-bit zeroing is precisely tracked even + * if writes are not aligned to BPF_REG_SIZE. This tests that our + * STACK_ZERO handling functions. + */ + struct test_val *val; + __u32 key = 0; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("valid map access into a pcpu array using constant without nullness") +__success __retval(4) __log_level(2) +__msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}") +unsigned int a_pcpu_array_with_a_constant_no_nullness(void) +{ + __u32 __attribute__((aligned(8))) key = 1; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array_pcpu, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant without nullness") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void) +{ + /* Out of bounds */ + __u32 __attribute__((aligned(8))) key = 3; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant smaller than key_size") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_too_small(void) +{ + __u32 __attribute__((aligned(8))) key; + struct test_val *val; + + /* Mark entire key as STACK_MISC */ + bpf_probe_read_user(&key, sizeof(key), NULL); + + /* Spilling only the bottom byte results in a tnum const of 1. + * We want to check that the verifier rejects it, as the spill is < 4B. + */ + *(__u8 *)&key = 1; + val = bpf_map_lookup_elem(&map_array, &key); + + /* Should fail, as verifier cannot prove in-bound lookup */ + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid map access into an array using constant larger than key_size") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int an_array_with_a_constant_too_big(void) +{ + struct test_val *val; + __u64 key = 1; + + /* Even if the constant value is < max_entries, if the spill size is + * larger than the key size, the set bits may not be where we expect them + * to be on different endian architectures. + */ + val = bpf_map_lookup_elem(&map_array, &key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("invalid elided lookup using const and non-const key") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int mixed_const_and_non_const_key_lookup(void) +{ + __u32 __attribute__((aligned(8))) key; + struct test_val *val; + __u32 rand; + + rand = bpf_get_prandom_u32(); + key = rand > 42 ? 1 : rand; + val = bpf_map_lookup_elem(&map_array, &key); + + return val->index; +} + +SEC("socket") +__failure __msg("invalid read from stack R2 off=4096 size=4") +__naked void key_lookup_at_invalid_fp(void) +{ + asm volatile (" \ + r1 = %[map_array] ll; \ + r2 = r10; \ + r2 += 4096; \ + call %[bpf_map_lookup_elem]; \ + r0 = *(u64*)(r0 + 0); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array) + : __clobber_all); +} + +volatile __u32 __attribute__((aligned(8))) global_key; + +SEC("socket") +__description("invalid elided lookup using non-stack key") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +unsigned int non_stack_key_lookup(void) +{ + struct test_val *val; + + global_key = 1; + val = bpf_map_lookup_elem(&map_array, (void *)&global_key); + val->index = offsetof(struct test_val, foo); + + return val->index; +} + +SEC("socket") +__description("doesn't reject UINT64_MAX as s64 for irrelevant maps") +__success __retval(42) +unsigned int doesnt_reject_irrelevant_maps(void) +{ + __u64 key = 0xFFFFFFFFFFFFFFFF; + struct test_val *val; + + val = bpf_map_lookup_elem(&map_hash_48b, &key); + if (val) + return val->index; + + return 42; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c index 8d77cc5323d3..fb62e09f2114 100644 --- a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c @@ -28,7 +28,7 @@ __naked void stack_out_of_bounds(void) SEC("socket") __description("uninitialized stack1") __success __log_level(4) __msg("stack depth 8") -__failure_unpriv __msg_unpriv("invalid indirect read from stack") +__failure_unpriv __msg_unpriv("invalid read from stack") __naked void uninitialized_stack1(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index a0bb7fb40ea5..0eb33bb801b5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1200,4 +1200,138 @@ l0_%=: r0 = 0; \ : __clobber_all); } +SEC("tc") +__description("multiply mixed sign bounds. test 1") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=0x1bc16d5cd4927ee1,smax=umax=0x1bc16d674ec80000,smax32=0x7ffffeff,umax32=0xfffffeff,var_off=(0x1bc16d4000000000; 0x3ffffffeff))") +__naked void mult_mixed0_sign(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xf;" + "r6 -= 1000000000;" + "r7 &= 0xf;" + "r7 -= 2000000000;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply mixed sign bounds. test 2") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=smin32=-100,smax=smax32=200)") +__naked void mult_mixed1_sign(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xf;" + "r6 -= 0xa;" + "r7 &= 0xf;" + "r7 -= 0x14;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply negative bounds") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar(smin=umin=smin32=umin32=0x3ff280b0,smax=umax=smax32=umax32=0x3fff0001,var_off=(0x3ff00000; 0xf81ff))") +__naked void mult_sign_bounds(void) +{ + asm volatile ( + "r8 = 0x7fff;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xa;" + "r6 -= r8;" + "r7 &= 0xf;" + "r7 -= r8;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiply bounds that don't cross signed boundary") +__success __log_level(2) +__msg("r8 *= r6 {{.*}}; R6_w=scalar(smin=smin32=0,smax=umax=smax32=umax32=11,var_off=(0x0; 0xb)) R8_w=scalar(smin=0,smax=umax=0x7b96bb0a94a3a7cd,var_off=(0x0; 0x7fffffffffffffff))") +__naked void mult_no_sign_crossing(void) +{ + asm volatile ( + "r6 = 0xb;" + "r8 = 0xb3c3f8c99262687 ll;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= r7;" + "r8 *= r6;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiplication overflow, result in unbounded reg. test 1") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar()") +__naked void mult_unsign_ovf(void) +{ + asm volatile ( + "r8 = 0x7ffffffffff ll;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0x7fffffff;" + "r7 &= r8;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} + +SEC("tc") +__description("multiplication overflow, result in unbounded reg. test 2") +__success __log_level(2) +__msg("r6 *= r7 {{.*}}; R6_w=scalar()") +__naked void mult_sign_ovf(void) +{ + asm volatile ( + "r8 = 0x7ffffffff ll;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r6 &= 0xa;" + "r6 -= r8;" + "r7 &= 0x7fffffff;" + "r6 *= r7;" + "exit" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_skb_store_bytes) + : __clobber_all); +} char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c index ba8922b2eebd..68c568c3c3a0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_const_or.c +++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c @@ -25,7 +25,7 @@ __naked void constant_should_keep_constant_type(void) SEC("tracepoint") __description("constant register |= constant should not bypass stack boundary checks") -__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__failure __msg("invalid write to stack R1 off=-48 size=58") __naked void not_bypass_stack_boundary_checks_1(void) { asm volatile (" \ @@ -62,7 +62,7 @@ __naked void register_should_keep_constant_type(void) SEC("tracepoint") __description("constant register |= constant register should not bypass stack boundary checks") -__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__failure __msg("invalid write to stack R1 off=-48 size=58") __naked void not_bypass_stack_boundary_checks_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c index 50c6b22606f6..f2c54e4d89eb 100644 --- a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c +++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c @@ -67,7 +67,7 @@ SEC("socket") __description("helper access to variable memory: stack, bitwise AND, zero included") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__msg_unpriv("invalid read from stack R2 off -64+0 size 64") __retval(0) __naked void stack_bitwise_and_zero_included(void) { @@ -100,7 +100,7 @@ __naked void stack_bitwise_and_zero_included(void) SEC("tracepoint") __description("helper access to variable memory: stack, bitwise AND + JMP, wrong max") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void bitwise_and_jmp_wrong_max(void) { asm volatile (" \ @@ -187,7 +187,7 @@ l0_%=: r0 = 0; \ SEC("tracepoint") __description("helper access to variable memory: stack, JMP, bounds + offset") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void memory_stack_jmp_bounds_offset(void) { asm volatile (" \ @@ -211,7 +211,7 @@ l0_%=: r0 = 0; \ SEC("tracepoint") __description("helper access to variable memory: stack, JMP, wrong max") -__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__failure __msg("invalid write to stack R1 off=-64 size=65") __naked void memory_stack_jmp_wrong_max(void) { asm volatile (" \ @@ -260,7 +260,7 @@ SEC("socket") __description("helper access to variable memory: stack, JMP, no min check") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__msg_unpriv("invalid read from stack R2 off -64+0 size 64") __retval(0) __naked void stack_jmp_no_min_check(void) { @@ -750,7 +750,7 @@ SEC("socket") __description("helper access to variable memory: 8 bytes leak") /* in privileged mode reads from uninitialized stack locations are permitted */ __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64") +__msg_unpriv("invalid read from stack R2 off -64+32 size 64") __retval(0) __naked void variable_memory_8_bytes_leak(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c index 5f2efb895edb..59e34d558654 100644 --- a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -96,7 +96,7 @@ __naked void arg_ptr_to_long_misaligned(void) SEC("cgroup/sysctl") __description("arg pointer to long size < sizeof(long)") -__failure __msg("invalid indirect access to stack R4 off=-4 size=8") +__failure __msg("invalid write to stack R4 off=-4 size=8") __naked void to_long_size_sizeof_long(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c index 4eaab1468eb7..7d088ba99ea5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -47,7 +47,7 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("map in map state pruning") -__success __msg("processed 26 insns") +__success __msg("processed 15 insns") __log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) __naked void map_in_map_state_pruning(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c new file mode 100644 index 000000000000..e81097c96fe2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_1.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("raw_tp") +__description("may_goto 0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_simple(void) +{ + asm volatile ( + ".8byte %[may_goto];" + "r0 = 1;" + ".8byte %[may_goto];" + "exit;" + : + : __imm_insn(may_goto, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("batch 2 of may_goto 0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_batch_0(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto1];" + "r0 = 1;" + ".8byte %[may_goto1];" + ".8byte %[may_goto1];" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("may_goto batch with offsets 2/1/0") +__arch_x86_64 +__xlated("0: r0 = 1") +__xlated("1: exit") +__success +__naked void may_goto_batch_1(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto2];" + ".8byte %[may_goto3];" + "r0 = 1;" + ".8byte %[may_goto1];" + ".8byte %[may_goto2];" + ".8byte %[may_goto3];" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)), + __imm_insn(may_goto2, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 1 /* offset */, 0)), + __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +SEC("raw_tp") +__description("may_goto batch with offsets 2/0") +__arch_x86_64 +__xlated("0: *(u64 *)(r10 -8) = 8388608") +__xlated("1: r11 = *(u64 *)(r10 -8)") +__xlated("2: if r11 == 0x0 goto pc+3") +__xlated("3: r11 -= 1") +__xlated("4: *(u64 *)(r10 -8) = r11") +__xlated("5: r0 = 1") +__xlated("6: r0 = 2") +__xlated("7: exit") +__success +__naked void may_goto_batch_2(void) +{ + asm volatile ( + ".8byte %[may_goto1];" + ".8byte %[may_goto3];" + "r0 = 1;" + "r0 = 2;" + "exit;" + : + : __imm_insn(may_goto1, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 2 /* offset */, 0)), + __imm_insn(may_goto3, BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0 /* offset */, 0)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c new file mode 100644 index 000000000000..b891faf50660 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_may_goto_2.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +int gvar; + +SEC("raw_tp") +__description("C code with may_goto 0") +__success +int may_goto_c_code(void) +{ + int i, tmp[3]; + + for (i = 0; i < 3 && can_loop; i++) + tmp[i] = 0; + + for (i = 0; i < 3 && can_loop; i++) + tmp[i] = gvar - i; + + for (i = 0; i < 3 && can_loop; i++) + gvar += tmp[i]; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c index 4ccf1ebc42d1..256956ea1ac5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_mtu.c +++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c @@ -8,7 +8,7 @@ SEC("tc/ingress") __description("uninit/mtu: write rejected") __success __caps_unpriv(CAP_BPF|CAP_NET_ADMIN) -__failure_unpriv __msg_unpriv("invalid indirect read from stack") +__failure_unpriv __msg_unpriv("invalid read from stack") int tc_uninit_mtu(struct __sk_buff *ctx) { __u32 mtu; diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c index 7cc83acac727..c689665e07b9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c +++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c @@ -236,7 +236,7 @@ __naked void load_bytes_spilled_regs_data(void) SEC("tc") __description("raw_stack: skb_load_bytes, invalid access 1") -__failure __msg("invalid indirect access to stack R3 off=-513 size=8") +__failure __msg("invalid write to stack R3 off=-513 size=8") __naked void load_bytes_invalid_access_1(void) { asm volatile (" \ @@ -255,7 +255,7 @@ __naked void load_bytes_invalid_access_1(void) SEC("tc") __description("raw_stack: skb_load_bytes, invalid access 2") -__failure __msg("invalid indirect access to stack R3 off=-1 size=8") +__failure __msg("invalid write to stack R3 off=-1 size=8") __naked void load_bytes_invalid_access_2(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index 3f679de73229..d9d7b05cf6d2 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -187,7 +187,7 @@ l0_%=: r6 = r0; \ SEC("cgroup/skb") __description("spin_lock: test6 missing unlock") -__failure __msg("BPF_EXIT instruction cannot be used inside bpf_spin_lock-ed region") +__failure __msg("BPF_EXIT instruction in main prog cannot be used inside bpf_spin_lock-ed region") __failure_unpriv __msg_unpriv("") __naked void spin_lock_test6_missing_unlock(void) { @@ -530,4 +530,30 @@ l1_%=: exit; \ : __clobber_all); } +SEC("tc") +__description("spin_lock: loop within a locked region") +__success __failure_unpriv __msg_unpriv("") +__retval(0) +int bpf_loop_inside_locked_region(void) +{ + const int zero = 0; + struct val *val; + int i, j = 0; + + val = bpf_map_lookup_elem(&map_spin_lock, &zero); + if (!val) + return -1; + + bpf_spin_lock(&val->l); + bpf_for(i, 0, 10) { + j++; + /* Silence "unused variable" warnings. */ + if (j == 10) + break; + } + bpf_spin_unlock(&val->l); + + return 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c index 7ea535bfbacd..a4a5e2071604 100644 --- a/tools/testing/selftests/bpf/progs/verifier_unpriv.c +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -199,7 +199,7 @@ __naked void pass_pointer_to_helper_function(void) SEC("socket") __description("unpriv: indirectly pass pointer on stack to helper function") __success __failure_unpriv -__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8") +__msg_unpriv("invalid read from stack R2 off -8+0 size 8") __retval(0) __naked void on_stack_to_helper_function(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c index c810f4f6f479..1d36d01b746e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_var_off.c +++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c @@ -203,7 +203,7 @@ __naked void stack_write_clobbers_spilled_regs(void) SEC("sockops") __description("indirect variable-offset stack access, unbounded") -__failure __msg("invalid unbounded variable-offset indirect access to stack R4") +__failure __msg("invalid unbounded variable-offset write to stack R4") __naked void variable_offset_stack_access_unbounded(void) { asm volatile (" \ @@ -236,7 +236,7 @@ l0_%=: r0 = 0; \ SEC("lwt_in") __description("indirect variable-offset stack access, max out of bound") -__failure __msg("invalid variable-offset indirect access to stack R2") +__failure __msg("invalid variable-offset read from stack R2") __naked void access_max_out_of_bound(void) { asm volatile (" \ @@ -269,7 +269,7 @@ __naked void access_max_out_of_bound(void) */ SEC("socket") __description("indirect variable-offset stack access, zero-sized, max out of bound") -__failure __msg("invalid variable-offset indirect access to stack R1") +__failure __msg("invalid variable-offset write to stack R1") __naked void zero_sized_access_max_out_of_bound(void) { asm volatile (" \ @@ -294,7 +294,7 @@ __naked void zero_sized_access_max_out_of_bound(void) SEC("lwt_in") __description("indirect variable-offset stack access, min out of bound") -__failure __msg("invalid variable-offset indirect access to stack R2") +__failure __msg("invalid variable-offset read from stack R2") __naked void access_min_out_of_bound(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c index f8d3ae0c29ae..2f1ba08c293e 100644 --- a/tools/testing/selftests/bpf/progs/wq.c +++ b/tools/testing/selftests/bpf/progs/wq.c @@ -5,7 +5,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c index 25b51a72fe0f..4240211a1900 100644 --- a/tools/testing/selftests/bpf/progs/wq_failures.c +++ b/tools/testing/selftests/bpf/progs/wq_failures.c @@ -5,7 +5,7 @@ #include "bpf_experimental.h" #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#include "../bpf_testmod/bpf_testmod_kfunc.h" +#include "../test_kmods/bpf_testmod_kfunc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_bpftool_synctypes.py b/tools/testing/selftests/bpf/test_bpftool_synctypes.py index 0ed67b6b31dd..238121fda5b6 100755 --- a/tools/testing/selftests/bpf/test_bpftool_synctypes.py +++ b/tools/testing/selftests/bpf/test_bpftool_synctypes.py @@ -66,7 +66,7 @@ class ArrayParser(BlockParser): def __init__(self, reader, array_name): self.array_name = array_name - self.start_marker = re.compile(f'(static )?const bool {self.array_name}\[.*\] = {{\n') + self.start_marker = re.compile(fr'(static )?const bool {self.array_name}\[.*\] = {{\n') super().__init__(reader) def search_block(self): @@ -80,7 +80,7 @@ class ArrayParser(BlockParser): Parse a block and return data as a dictionary. Items to extract must be on separate lines in the file. """ - pattern = re.compile('\[(BPF_\w*)\]\s*= (true|false),?$') + pattern = re.compile(r'\[(BPF_\w*)\]\s*= (true|false),?$') entries = set() while True: line = self.reader.readline() @@ -178,7 +178,7 @@ class FileExtractor(object): @enum_name: name of the enum to parse """ start_marker = re.compile(f'enum {enum_name} {{\n') - pattern = re.compile('^\s*(BPF_\w+),?(\s+/\*.*\*/)?$') + pattern = re.compile(r'^\s*(BPF_\w+),?(\s+/\*.*\*/)?$') end_marker = re.compile('^};') parser = BlockParser(self.reader) parser.search_block(start_marker) @@ -226,8 +226,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'\*{block_name}\* := {{') - pattern = re.compile('\*\*([\w/-]+)\*\*') + start_marker = re.compile(fr'\*{block_name}\* := {{') + pattern = re.compile(r'\*\*([\w/-]+)\*\*') end_marker = re.compile('}\n') return self.__get_description_list(start_marker, pattern, end_marker) @@ -245,8 +245,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'"\s*{block_name} := {{') - pattern = re.compile('([\w/]+) [|}]') + start_marker = re.compile(fr'"\s*{block_name} := {{') + pattern = re.compile(r'([\w/]+) [|}]') end_marker = re.compile('}') return self.__get_description_list(start_marker, pattern, end_marker) @@ -264,8 +264,8 @@ class FileExtractor(object): @macro: macro starting the block, 'HELP_SPEC_OPTIONS' in the example """ - start_marker = re.compile(f'"\s*{macro}\s*" [|}}]') - pattern = re.compile('([\w-]+) ?(?:\||}[ }\]])') + start_marker = re.compile(fr'"\s*{macro}\s*" [|}}]') + pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]])') end_marker = re.compile('}\\\\n') return self.__get_description_list(start_marker, pattern, end_marker) @@ -283,8 +283,8 @@ class FileExtractor(object): @block_name: name of the blog to parse, 'TYPE' in the example """ - start_marker = re.compile(f'local {block_name}=\'') - pattern = re.compile('(?:.*=\')?([\w/]+)') + start_marker = re.compile(fr'local {block_name}=\'') + pattern = re.compile(r'(?:.*=\')?([\w/]+)') end_marker = re.compile('\'$') return self.__get_description_list(start_marker, pattern, end_marker) @@ -316,7 +316,7 @@ class MainHeaderFileExtractor(SourceFileExtractor): {'-p', '-d', '--pretty', '--debug', '--json', '-j'} """ start_marker = re.compile(f'"OPTIONS :=') - pattern = re.compile('([\w-]+) ?(?:\||}[ }\]"])') + pattern = re.compile(r'([\w-]+) ?(?:\||}[ }\]"])') end_marker = re.compile('#define') parser = InlineListParser(self.reader) @@ -338,8 +338,8 @@ class ManSubstitutionsExtractor(SourceFileExtractor): {'-p', '-d', '--pretty', '--debug', '--json', '-j'} """ - start_marker = re.compile('\|COMMON_OPTIONS\| replace:: {') - pattern = re.compile('\*\*([\w/-]+)\*\*') + start_marker = re.compile(r'\|COMMON_OPTIONS\| replace:: {') + pattern = re.compile(r'\*\*([\w/-]+)\*\*') end_marker = re.compile('}$') parser = InlineListParser(self.reader) diff --git a/tools/testing/selftests/bpf/test_flow_dissector.c b/tools/testing/selftests/bpf/test_flow_dissector.c deleted file mode 100644 index 571cc076dd7d..000000000000 --- a/tools/testing/selftests/bpf/test_flow_dissector.c +++ /dev/null @@ -1,780 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Inject packets with all sorts of encapsulation into the kernel. - * - * IPv4/IPv6 outer layer 3 - * GRE/GUE/BARE outer layer 4, where bare is IPIP/SIT/IPv4-in-IPv6/.. - * IPv4/IPv6 inner layer 3 - */ - -#define _GNU_SOURCE - -#include <stddef.h> -#include <arpa/inet.h> -#include <asm/byteorder.h> -#include <error.h> -#include <errno.h> -#include <linux/if_packet.h> -#include <linux/if_ether.h> -#include <linux/ipv6.h> -#include <netinet/ip.h> -#include <netinet/in.h> -#include <netinet/udp.h> -#include <poll.h> -#include <stdbool.h> -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <sys/ioctl.h> -#include <sys/socket.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <sys/types.h> -#include <unistd.h> - -#define CFG_PORT_INNER 8000 - -/* Add some protocol definitions that do not exist in userspace */ - -struct grehdr { - uint16_t unused; - uint16_t protocol; -} __attribute__((packed)); - -struct guehdr { - union { - struct { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 hlen:5, - control:1, - version:2; -#elif defined (__BIG_ENDIAN_BITFIELD) - __u8 version:2, - control:1, - hlen:5; -#else -#error "Please fix <asm/byteorder.h>" -#endif - __u8 proto_ctype; - __be16 flags; - }; - __be32 word; - }; -}; - -static uint8_t cfg_dsfield_inner; -static uint8_t cfg_dsfield_outer; -static uint8_t cfg_encap_proto; -static bool cfg_expect_failure = false; -static int cfg_l3_extra = AF_UNSPEC; /* optional SIT prefix */ -static int cfg_l3_inner = AF_UNSPEC; -static int cfg_l3_outer = AF_UNSPEC; -static int cfg_num_pkt = 10; -static int cfg_num_secs = 0; -static char cfg_payload_char = 'a'; -static int cfg_payload_len = 100; -static int cfg_port_gue = 6080; -static bool cfg_only_rx; -static bool cfg_only_tx; -static int cfg_src_port = 9; - -static char buf[ETH_DATA_LEN]; - -#define INIT_ADDR4(name, addr4, port) \ - static struct sockaddr_in name = { \ - .sin_family = AF_INET, \ - .sin_port = __constant_htons(port), \ - .sin_addr.s_addr = __constant_htonl(addr4), \ - }; - -#define INIT_ADDR6(name, addr6, port) \ - static struct sockaddr_in6 name = { \ - .sin6_family = AF_INET6, \ - .sin6_port = __constant_htons(port), \ - .sin6_addr = addr6, \ - }; - -INIT_ADDR4(in_daddr4, INADDR_LOOPBACK, CFG_PORT_INNER) -INIT_ADDR4(in_saddr4, INADDR_LOOPBACK + 2, 0) -INIT_ADDR4(out_daddr4, INADDR_LOOPBACK, 0) -INIT_ADDR4(out_saddr4, INADDR_LOOPBACK + 1, 0) -INIT_ADDR4(extra_daddr4, INADDR_LOOPBACK, 0) -INIT_ADDR4(extra_saddr4, INADDR_LOOPBACK + 1, 0) - -INIT_ADDR6(in_daddr6, IN6ADDR_LOOPBACK_INIT, CFG_PORT_INNER) -INIT_ADDR6(in_saddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(out_daddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(out_saddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(extra_daddr6, IN6ADDR_LOOPBACK_INIT, 0) -INIT_ADDR6(extra_saddr6, IN6ADDR_LOOPBACK_INIT, 0) - -static unsigned long util_gettime(void) -{ - struct timeval tv; - - gettimeofday(&tv, NULL); - return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); -} - -static void util_printaddr(const char *msg, struct sockaddr *addr) -{ - unsigned long off = 0; - char nbuf[INET6_ADDRSTRLEN]; - - switch (addr->sa_family) { - case PF_INET: - off = __builtin_offsetof(struct sockaddr_in, sin_addr); - break; - case PF_INET6: - off = __builtin_offsetof(struct sockaddr_in6, sin6_addr); - break; - default: - error(1, 0, "printaddr: unsupported family %u\n", - addr->sa_family); - } - - if (!inet_ntop(addr->sa_family, ((void *) addr) + off, nbuf, - sizeof(nbuf))) - error(1, errno, "inet_ntop"); - - fprintf(stderr, "%s: %s\n", msg, nbuf); -} - -static unsigned long add_csum_hword(const uint16_t *start, int num_u16) -{ - unsigned long sum = 0; - int i; - - for (i = 0; i < num_u16; i++) - sum += start[i]; - - return sum; -} - -static uint16_t build_ip_csum(const uint16_t *start, int num_u16, - unsigned long sum) -{ - sum += add_csum_hword(start, num_u16); - - while (sum >> 16) - sum = (sum & 0xffff) + (sum >> 16); - - return ~sum; -} - -static void build_ipv4_header(void *header, uint8_t proto, - uint32_t src, uint32_t dst, - int payload_len, uint8_t tos) -{ - struct iphdr *iph = header; - - iph->ihl = 5; - iph->version = 4; - iph->tos = tos; - iph->ttl = 8; - iph->tot_len = htons(sizeof(*iph) + payload_len); - iph->id = htons(1337); - iph->protocol = proto; - iph->saddr = src; - iph->daddr = dst; - iph->check = build_ip_csum((void *) iph, iph->ihl << 1, 0); -} - -static void ipv6_set_dsfield(struct ipv6hdr *ip6h, uint8_t dsfield) -{ - uint16_t val, *ptr = (uint16_t *)ip6h; - - val = ntohs(*ptr); - val &= 0xF00F; - val |= ((uint16_t) dsfield) << 4; - *ptr = htons(val); -} - -static void build_ipv6_header(void *header, uint8_t proto, - struct sockaddr_in6 *src, - struct sockaddr_in6 *dst, - int payload_len, uint8_t dsfield) -{ - struct ipv6hdr *ip6h = header; - - ip6h->version = 6; - ip6h->payload_len = htons(payload_len); - ip6h->nexthdr = proto; - ip6h->hop_limit = 8; - ipv6_set_dsfield(ip6h, dsfield); - - memcpy(&ip6h->saddr, &src->sin6_addr, sizeof(ip6h->saddr)); - memcpy(&ip6h->daddr, &dst->sin6_addr, sizeof(ip6h->daddr)); -} - -static uint16_t build_udp_v4_csum(const struct iphdr *iph, - const struct udphdr *udph, - int num_words) -{ - unsigned long pseudo_sum; - int num_u16 = sizeof(iph->saddr); /* halfwords: twice byte len */ - - pseudo_sum = add_csum_hword((void *) &iph->saddr, num_u16); - pseudo_sum += htons(IPPROTO_UDP); - pseudo_sum += udph->len; - return build_ip_csum((void *) udph, num_words, pseudo_sum); -} - -static uint16_t build_udp_v6_csum(const struct ipv6hdr *ip6h, - const struct udphdr *udph, - int num_words) -{ - unsigned long pseudo_sum; - int num_u16 = sizeof(ip6h->saddr); /* halfwords: twice byte len */ - - pseudo_sum = add_csum_hword((void *) &ip6h->saddr, num_u16); - pseudo_sum += htons(ip6h->nexthdr); - pseudo_sum += ip6h->payload_len; - return build_ip_csum((void *) udph, num_words, pseudo_sum); -} - -static void build_udp_header(void *header, int payload_len, - uint16_t dport, int family) -{ - struct udphdr *udph = header; - int len = sizeof(*udph) + payload_len; - - udph->source = htons(cfg_src_port); - udph->dest = htons(dport); - udph->len = htons(len); - udph->check = 0; - if (family == AF_INET) - udph->check = build_udp_v4_csum(header - sizeof(struct iphdr), - udph, len >> 1); - else - udph->check = build_udp_v6_csum(header - sizeof(struct ipv6hdr), - udph, len >> 1); -} - -static void build_gue_header(void *header, uint8_t proto) -{ - struct guehdr *gueh = header; - - gueh->proto_ctype = proto; -} - -static void build_gre_header(void *header, uint16_t proto) -{ - struct grehdr *greh = header; - - greh->protocol = htons(proto); -} - -static int l3_length(int family) -{ - if (family == AF_INET) - return sizeof(struct iphdr); - else - return sizeof(struct ipv6hdr); -} - -static int build_packet(void) -{ - int ol3_len = 0, ol4_len = 0, il3_len = 0, il4_len = 0; - int el3_len = 0; - - if (cfg_l3_extra) - el3_len = l3_length(cfg_l3_extra); - - /* calculate header offsets */ - if (cfg_encap_proto) { - ol3_len = l3_length(cfg_l3_outer); - - if (cfg_encap_proto == IPPROTO_GRE) - ol4_len = sizeof(struct grehdr); - else if (cfg_encap_proto == IPPROTO_UDP) - ol4_len = sizeof(struct udphdr) + sizeof(struct guehdr); - } - - il3_len = l3_length(cfg_l3_inner); - il4_len = sizeof(struct udphdr); - - if (el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len >= - sizeof(buf)) - error(1, 0, "packet too large\n"); - - /* - * Fill packet from inside out, to calculate correct checksums. - * But create ip before udp headers, as udp uses ip for pseudo-sum. - */ - memset(buf + el3_len + ol3_len + ol4_len + il3_len + il4_len, - cfg_payload_char, cfg_payload_len); - - /* add zero byte for udp csum padding */ - buf[el3_len + ol3_len + ol4_len + il3_len + il4_len + cfg_payload_len] = 0; - - switch (cfg_l3_inner) { - case PF_INET: - build_ipv4_header(buf + el3_len + ol3_len + ol4_len, - IPPROTO_UDP, - in_saddr4.sin_addr.s_addr, - in_daddr4.sin_addr.s_addr, - il4_len + cfg_payload_len, - cfg_dsfield_inner); - break; - case PF_INET6: - build_ipv6_header(buf + el3_len + ol3_len + ol4_len, - IPPROTO_UDP, - &in_saddr6, &in_daddr6, - il4_len + cfg_payload_len, - cfg_dsfield_inner); - break; - } - - build_udp_header(buf + el3_len + ol3_len + ol4_len + il3_len, - cfg_payload_len, CFG_PORT_INNER, cfg_l3_inner); - - if (!cfg_encap_proto) - return il3_len + il4_len + cfg_payload_len; - - switch (cfg_l3_outer) { - case PF_INET: - build_ipv4_header(buf + el3_len, cfg_encap_proto, - out_saddr4.sin_addr.s_addr, - out_daddr4.sin_addr.s_addr, - ol4_len + il3_len + il4_len + cfg_payload_len, - cfg_dsfield_outer); - break; - case PF_INET6: - build_ipv6_header(buf + el3_len, cfg_encap_proto, - &out_saddr6, &out_daddr6, - ol4_len + il3_len + il4_len + cfg_payload_len, - cfg_dsfield_outer); - break; - } - - switch (cfg_encap_proto) { - case IPPROTO_UDP: - build_gue_header(buf + el3_len + ol3_len + ol4_len - - sizeof(struct guehdr), - cfg_l3_inner == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6); - build_udp_header(buf + el3_len + ol3_len, - sizeof(struct guehdr) + il3_len + il4_len + - cfg_payload_len, - cfg_port_gue, cfg_l3_outer); - break; - case IPPROTO_GRE: - build_gre_header(buf + el3_len + ol3_len, - cfg_l3_inner == PF_INET ? ETH_P_IP - : ETH_P_IPV6); - break; - } - - switch (cfg_l3_extra) { - case PF_INET: - build_ipv4_header(buf, - cfg_l3_outer == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6, - extra_saddr4.sin_addr.s_addr, - extra_daddr4.sin_addr.s_addr, - ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len, 0); - break; - case PF_INET6: - build_ipv6_header(buf, - cfg_l3_outer == PF_INET ? IPPROTO_IPIP - : IPPROTO_IPV6, - &extra_saddr6, &extra_daddr6, - ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len, 0); - break; - } - - return el3_len + ol3_len + ol4_len + il3_len + il4_len + - cfg_payload_len; -} - -/* sender transmits encapsulated over RAW or unencap'd over UDP */ -static int setup_tx(void) -{ - int family, fd, ret; - - if (cfg_l3_extra) - family = cfg_l3_extra; - else if (cfg_l3_outer) - family = cfg_l3_outer; - else - family = cfg_l3_inner; - - fd = socket(family, SOCK_RAW, IPPROTO_RAW); - if (fd == -1) - error(1, errno, "socket tx"); - - if (cfg_l3_extra) { - if (cfg_l3_extra == PF_INET) - ret = connect(fd, (void *) &extra_daddr4, - sizeof(extra_daddr4)); - else - ret = connect(fd, (void *) &extra_daddr6, - sizeof(extra_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } else if (cfg_l3_outer) { - /* connect to destination if not encapsulated */ - if (cfg_l3_outer == PF_INET) - ret = connect(fd, (void *) &out_daddr4, - sizeof(out_daddr4)); - else - ret = connect(fd, (void *) &out_daddr6, - sizeof(out_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } else { - /* otherwise using loopback */ - if (cfg_l3_inner == PF_INET) - ret = connect(fd, (void *) &in_daddr4, - sizeof(in_daddr4)); - else - ret = connect(fd, (void *) &in_daddr6, - sizeof(in_daddr6)); - if (ret) - error(1, errno, "connect tx"); - } - - return fd; -} - -/* receiver reads unencapsulated UDP */ -static int setup_rx(void) -{ - int fd, ret; - - fd = socket(cfg_l3_inner, SOCK_DGRAM, 0); - if (fd == -1) - error(1, errno, "socket rx"); - - if (cfg_l3_inner == PF_INET) - ret = bind(fd, (void *) &in_daddr4, sizeof(in_daddr4)); - else - ret = bind(fd, (void *) &in_daddr6, sizeof(in_daddr6)); - if (ret) - error(1, errno, "bind rx"); - - return fd; -} - -static int do_tx(int fd, const char *pkt, int len) -{ - int ret; - - ret = write(fd, pkt, len); - if (ret == -1) - error(1, errno, "send"); - if (ret != len) - error(1, errno, "send: len (%d < %d)\n", ret, len); - - return 1; -} - -static int do_poll(int fd, short events, int timeout) -{ - struct pollfd pfd; - int ret; - - pfd.fd = fd; - pfd.events = events; - - ret = poll(&pfd, 1, timeout); - if (ret == -1) - error(1, errno, "poll"); - if (ret && !(pfd.revents & POLLIN)) - error(1, errno, "poll: unexpected event 0x%x\n", pfd.revents); - - return ret; -} - -static int do_rx(int fd) -{ - char rbuf; - int ret, num = 0; - - while (1) { - ret = recv(fd, &rbuf, 1, MSG_DONTWAIT); - if (ret == -1 && errno == EAGAIN) - break; - if (ret == -1) - error(1, errno, "recv"); - if (rbuf != cfg_payload_char) - error(1, 0, "recv: payload mismatch"); - num++; - } - - return num; -} - -static int do_main(void) -{ - unsigned long tstop, treport, tcur; - int fdt = -1, fdr = -1, len, tx = 0, rx = 0; - - if (!cfg_only_tx) - fdr = setup_rx(); - if (!cfg_only_rx) - fdt = setup_tx(); - - len = build_packet(); - - tcur = util_gettime(); - treport = tcur + 1000; - tstop = tcur + (cfg_num_secs * 1000); - - while (1) { - if (!cfg_only_rx) - tx += do_tx(fdt, buf, len); - - if (!cfg_only_tx) - rx += do_rx(fdr); - - if (cfg_num_secs) { - tcur = util_gettime(); - if (tcur >= tstop) - break; - if (tcur >= treport) { - fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); - tx = 0; - rx = 0; - treport = tcur + 1000; - } - } else { - if (tx == cfg_num_pkt) - break; - } - } - - /* read straggler packets, if any */ - if (rx < tx) { - tstop = util_gettime() + 100; - while (rx < tx) { - tcur = util_gettime(); - if (tcur >= tstop) - break; - - do_poll(fdr, POLLIN, tstop - tcur); - rx += do_rx(fdr); - } - } - - fprintf(stderr, "pkts: tx=%u rx=%u\n", tx, rx); - - if (fdr != -1 && close(fdr)) - error(1, errno, "close rx"); - if (fdt != -1 && close(fdt)) - error(1, errno, "close tx"); - - /* - * success (== 0) only if received all packets - * unless failure is expected, in which case none must arrive. - */ - if (cfg_expect_failure) - return rx != 0; - else - return rx != tx; -} - - -static void __attribute__((noreturn)) usage(const char *filepath) -{ - fprintf(stderr, "Usage: %s [-e gre|gue|bare|none] [-i 4|6] [-l len] " - "[-O 4|6] [-o 4|6] [-n num] [-t secs] [-R] [-T] " - "[-s <osrc> [-d <odst>] [-S <isrc>] [-D <idst>] " - "[-x <otos>] [-X <itos>] [-f <isport>] [-F]\n", - filepath); - exit(1); -} - -static void parse_addr(int family, void *addr, const char *optarg) -{ - int ret; - - ret = inet_pton(family, optarg, addr); - if (ret == -1) - error(1, errno, "inet_pton"); - if (ret == 0) - error(1, 0, "inet_pton: bad string"); -} - -static void parse_addr4(struct sockaddr_in *addr, const char *optarg) -{ - parse_addr(AF_INET, &addr->sin_addr, optarg); -} - -static void parse_addr6(struct sockaddr_in6 *addr, const char *optarg) -{ - parse_addr(AF_INET6, &addr->sin6_addr, optarg); -} - -static int parse_protocol_family(const char *filepath, const char *optarg) -{ - if (!strcmp(optarg, "4")) - return PF_INET; - if (!strcmp(optarg, "6")) - return PF_INET6; - - usage(filepath); -} - -static void parse_opts(int argc, char **argv) -{ - int c; - - while ((c = getopt(argc, argv, "d:D:e:f:Fhi:l:n:o:O:Rs:S:t:Tx:X:")) != -1) { - switch (c) { - case 'd': - if (cfg_l3_outer == AF_UNSPEC) - error(1, 0, "-d must be preceded by -o"); - if (cfg_l3_outer == AF_INET) - parse_addr4(&out_daddr4, optarg); - else - parse_addr6(&out_daddr6, optarg); - break; - case 'D': - if (cfg_l3_inner == AF_UNSPEC) - error(1, 0, "-D must be preceded by -i"); - if (cfg_l3_inner == AF_INET) - parse_addr4(&in_daddr4, optarg); - else - parse_addr6(&in_daddr6, optarg); - break; - case 'e': - if (!strcmp(optarg, "gre")) - cfg_encap_proto = IPPROTO_GRE; - else if (!strcmp(optarg, "gue")) - cfg_encap_proto = IPPROTO_UDP; - else if (!strcmp(optarg, "bare")) - cfg_encap_proto = IPPROTO_IPIP; - else if (!strcmp(optarg, "none")) - cfg_encap_proto = IPPROTO_IP; /* == 0 */ - else - usage(argv[0]); - break; - case 'f': - cfg_src_port = strtol(optarg, NULL, 0); - break; - case 'F': - cfg_expect_failure = true; - break; - case 'h': - usage(argv[0]); - break; - case 'i': - if (!strcmp(optarg, "4")) - cfg_l3_inner = PF_INET; - else if (!strcmp(optarg, "6")) - cfg_l3_inner = PF_INET6; - else - usage(argv[0]); - break; - case 'l': - cfg_payload_len = strtol(optarg, NULL, 0); - break; - case 'n': - cfg_num_pkt = strtol(optarg, NULL, 0); - break; - case 'o': - cfg_l3_outer = parse_protocol_family(argv[0], optarg); - break; - case 'O': - cfg_l3_extra = parse_protocol_family(argv[0], optarg); - break; - case 'R': - cfg_only_rx = true; - break; - case 's': - if (cfg_l3_outer == AF_INET) - parse_addr4(&out_saddr4, optarg); - else - parse_addr6(&out_saddr6, optarg); - break; - case 'S': - if (cfg_l3_inner == AF_INET) - parse_addr4(&in_saddr4, optarg); - else - parse_addr6(&in_saddr6, optarg); - break; - case 't': - cfg_num_secs = strtol(optarg, NULL, 0); - break; - case 'T': - cfg_only_tx = true; - break; - case 'x': - cfg_dsfield_outer = strtol(optarg, NULL, 0); - break; - case 'X': - cfg_dsfield_inner = strtol(optarg, NULL, 0); - break; - } - } - - if (cfg_only_rx && cfg_only_tx) - error(1, 0, "options: cannot combine rx-only and tx-only"); - - if (cfg_encap_proto && cfg_l3_outer == AF_UNSPEC) - error(1, 0, "options: must specify outer with encap"); - else if ((!cfg_encap_proto) && cfg_l3_outer != AF_UNSPEC) - error(1, 0, "options: cannot combine no-encap and outer"); - else if ((!cfg_encap_proto) && cfg_l3_extra != AF_UNSPEC) - error(1, 0, "options: cannot combine no-encap and extra"); - - if (cfg_l3_inner == AF_UNSPEC) - cfg_l3_inner = AF_INET6; - if (cfg_l3_inner == AF_INET6 && cfg_encap_proto == IPPROTO_IPIP) - cfg_encap_proto = IPPROTO_IPV6; - - /* RFC 6040 4.2: - * on decap, if outer encountered congestion (CE == 0x3), - * but inner cannot encode ECN (NoECT == 0x0), then drop packet. - */ - if (((cfg_dsfield_outer & 0x3) == 0x3) && - ((cfg_dsfield_inner & 0x3) == 0x0)) - cfg_expect_failure = true; -} - -static void print_opts(void) -{ - if (cfg_l3_inner == PF_INET6) { - util_printaddr("inner.dest6", (void *) &in_daddr6); - util_printaddr("inner.source6", (void *) &in_saddr6); - } else { - util_printaddr("inner.dest4", (void *) &in_daddr4); - util_printaddr("inner.source4", (void *) &in_saddr4); - } - - if (!cfg_l3_outer) - return; - - fprintf(stderr, "encap proto: %u\n", cfg_encap_proto); - - if (cfg_l3_outer == PF_INET6) { - util_printaddr("outer.dest6", (void *) &out_daddr6); - util_printaddr("outer.source6", (void *) &out_saddr6); - } else { - util_printaddr("outer.dest4", (void *) &out_daddr4); - util_printaddr("outer.source4", (void *) &out_saddr4); - } - - if (!cfg_l3_extra) - return; - - if (cfg_l3_outer == PF_INET6) { - util_printaddr("extra.dest6", (void *) &extra_daddr6); - util_printaddr("extra.source6", (void *) &extra_saddr6); - } else { - util_printaddr("extra.dest4", (void *) &extra_daddr4); - util_printaddr("extra.source4", (void *) &extra_saddr4); - } - -} - -int main(int argc, char **argv) -{ - parse_opts(argc, argv); - print_opts(); - return do_main(); -} diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh deleted file mode 100755 index 4b298863797a..000000000000 --- a/tools/testing/selftests/bpf/test_flow_dissector.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# Load BPF flow dissector and verify it correctly dissects traffic - -BPF_FILE="bpf_flow.bpf.o" -export TESTNAME=test_flow_dissector -unmount=0 - -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - -msg="skip all tests:" -if [ $UID != 0 ]; then - echo $msg please run this as root >&2 - exit $ksft_skip -fi - -# This test needs to be run in a network namespace with in_netns.sh. Check if -# this is the case and run it with in_netns.sh if it is being run in the root -# namespace. -if [[ -z $(ip netns identify $$) ]]; then - err=0 - if bpftool="$(which bpftool)"; then - echo "Testing global flow dissector..." - - $bpftool prog loadall $BPF_FILE /sys/fs/bpf/flow \ - type flow_dissector - - if ! unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Unexpected unsuccessful attach in namespace" >&2 - err=1 - fi - - $bpftool prog attach pinned /sys/fs/bpf/flow/_dissect \ - flow_dissector - - if unshare --net $bpftool prog attach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Unexpected successful attach in namespace" >&2 - err=1 - fi - - if ! $bpftool prog detach pinned \ - /sys/fs/bpf/flow/_dissect flow_dissector; then - echo "Failed to detach flow dissector" >&2 - err=1 - fi - - rm -rf /sys/fs/bpf/flow - else - echo "Skipping root flow dissector test, bpftool not found" >&2 - fi - - # Run the rest of the tests in a net namespace. - ../net/in_netns.sh "$0" "$@" - err=$(( $err + $? )) - - if (( $err == 0 )); then - echo "selftests: $TESTNAME [PASS]"; - else - echo "selftests: $TESTNAME [FAILED]"; - fi - - exit $err -fi - -# Determine selftest success via shell exit code -exit_handler() -{ - set +e - - # Cleanup - tc filter del dev lo ingress pref 1337 2> /dev/null - tc qdisc del dev lo ingress 2> /dev/null - ./flow_dissector_load -d 2> /dev/null - if [ $unmount -ne 0 ]; then - umount bpffs 2> /dev/null - fi -} - -# Exit script immediately (well catched by trap handler) if any -# program/thing exits with a non-zero status. -set -e - -# (Use 'trap -l' to list meaning of numbers) -trap exit_handler 0 2 3 6 9 - -# Mount BPF file system -if /bin/mount | grep /sys/fs/bpf > /dev/null; then - echo "bpffs already mounted" -else - echo "bpffs not mounted. Mounting..." - unmount=1 - /bin/mount bpffs /sys/fs/bpf -t bpf -fi - -# Attach BPF program -./flow_dissector_load -p $BPF_FILE -s _dissect - -# Setup -tc qdisc add dev lo ingress -echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter -echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter -echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter - -echo "Testing IPv4..." -# Drops all IP/UDP packets coming from port 9 -tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ - udp src_port 9 action drop - -# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 4 -f 8 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -f 9 -F -# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 4 -f 10 - -echo "Testing IPv4 from 127.0.0.127 (fallback to generic dissector)..." -# Send 10 IPv4/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 8 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 9 -F -# Send 10 IPv4/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 4 -S 127.0.0.127 -f 10 - -echo "Testing IPIP..." -# Send 10 IPv4/IPv4/UDP packets from port 8. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 8 -# Send 10 IPv4/IPv4/UDP packets from port 9. Filter should drop all. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 9 -F -# Send 10 IPv4/IPv4/UDP packets from port 10. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e bare -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 10 - -echo "Testing IPv4 + GRE..." -# Send 10 IPv4/GRE/IPv4/UDP packets from port 8. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 8 -# Send 10 IPv4/GRE/IPv4/UDP packets from port 9. Filter should drop all. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 9 -F -# Send 10 IPv4/GRE/IPv4/UDP packets from port 10. Filter should not drop any. -./with_addr.sh ./with_tunnels.sh ./test_flow_dissector -o 4 -e gre -i 4 \ - -D 192.168.0.1 -S 1.1.1.1 -f 10 - -tc filter del dev lo ingress pref 1337 - -echo "Testing port range..." -# Drops all IP/UDP packets coming from port 8-10 -tc filter add dev lo parent ffff: protocol ip pref 1337 flower ip_proto \ - udp src_port 8-10 action drop - -# Send 10 IPv4/UDP packets from port 7. Filter should not drop any. -./test_flow_dissector -i 4 -f 7 -# Send 10 IPv4/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 4 -f 9 -F -# Send 10 IPv4/UDP packets from port 11. Filter should not drop any. -./test_flow_dissector -i 4 -f 11 - -tc filter del dev lo ingress pref 1337 - -echo "Testing IPv6..." -# Drops all IPv6/UDP packets coming from port 9 -tc filter add dev lo parent ffff: protocol ipv6 pref 1337 flower ip_proto \ - udp src_port 9 action drop - -# Send 10 IPv6/UDP packets from port 8. Filter should not drop any. -./test_flow_dissector -i 6 -f 8 -# Send 10 IPv6/UDP packets from port 9. Filter should drop all. -./test_flow_dissector -i 6 -f 9 -F -# Send 10 IPv6/UDP packets from port 10. Filter should not drop any. -./test_flow_dissector -i 6 -f 10 - -exit 0 diff --git a/tools/testing/selftests/bpf/bpf_testmod/.gitignore b/tools/testing/selftests/bpf/test_kmods/.gitignore index ded513777281..ded513777281 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/.gitignore +++ b/tools/testing/selftests/bpf/test_kmods/.gitignore diff --git a/tools/testing/selftests/bpf/test_kmods/Makefile b/tools/testing/selftests/bpf/test_kmods/Makefile new file mode 100644 index 000000000000..d4e50c4509c9 --- /dev/null +++ b/tools/testing/selftests/bpf/test_kmods/Makefile @@ -0,0 +1,21 @@ +TEST_KMOD_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +KDIR ?= $(abspath $(TEST_KMOD_DIR)/../../../../..) + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +MODULES = bpf_testmod.ko bpf_test_no_cfi.ko bpf_test_modorder_x.ko \ + bpf_test_modorder_y.ko + +$(foreach m,$(MODULES),$(eval obj-m += $(m:.ko=.o))) + +CFLAGS_bpf_testmod.o = -I$(src) + +all: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) modules + +clean: + $(Q)$(MAKE) -C $(KDIR) M=$(TEST_KMOD_DIR) clean diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c index 0cc747fa912f..0cc747fa912f 100644 --- a/tools/testing/selftests/bpf/bpf_test_modorder_x/bpf_test_modorder_x.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_x.c diff --git a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c index c627ee085d13..c627ee085d13 100644 --- a/tools/testing/selftests/bpf/bpf_test_modorder_y/bpf_test_modorder_y.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_modorder_y.c diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c index 948eb3962732..948eb3962732 100644 --- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_no_cfi.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h index aeef86b3da74..aeef86b3da74 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod-events.h diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index cc9dde507aba..cc9dde507aba 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h index 356803d1c10e..356803d1c10e 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.h diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h index b58817938deb..b58817938deb 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 6088d8222d59..c9e745d49493 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -1282,6 +1282,21 @@ void crash_handler(int signum) backtrace_symbols_fd(bt, sz, STDERR_FILENO); } +void hexdump(const char *prefix, const void *buf, size_t len) +{ + for (int i = 0; i < len; i++) { + if (!(i % 16)) { + if (i) + fprintf(stdout, "\n"); + fprintf(stdout, "%s", prefix); + } + if (i && !(i % 8) && (i % 16)) + fprintf(stdout, "\t"); + fprintf(stdout, "%02X ", ((uint8_t *)(buf))[i]); + } + fprintf(stdout, "\n"); +} + static void sigint_handler(int signum) { int i; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 74de33ae37e5..404d0d4915d5 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -185,6 +185,7 @@ void test__end_subtest(void); void test__skip(void); void test__fail(void); int test__join_cgroup(const char *path); +void hexdump(const char *prefix, const void *buf, size_t len); #define PRINT_FAIL(format...) \ ({ \ @@ -344,6 +345,20 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_MEMEQ(actual, expected, len, name) ({ \ + static int duration = 0; \ + const void *__act = actual; \ + const void *__exp = expected; \ + int __len = len; \ + bool ___ok = memcmp(__act, __exp, __len) == 0; \ + CHECK(!___ok, (name), "unexpected memory mismatch\n"); \ + fprintf(stdout, "actual:\n"); \ + hexdump("\t", __act, __len); \ + fprintf(stdout, "expected:\n"); \ + hexdump("\t", __exp, __len); \ + ___ok; \ +}) + #define ASSERT_OK(res, name) ({ \ static int duration = 0; \ long long ___res = (res); \ diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index 7989ec608454..cb55a908bb0d 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -305,6 +305,7 @@ else client_connect verify_data server_listen + wait_for_port ${port} ${netcat_opt} fi # serverside, use BPF for decap diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh deleted file mode 100755 index 2740322c1878..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_meta.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/sh - -BPF_FILE="test_xdp_meta.bpf.o" -# Kselftest framework requirement - SKIP code is 4. -readonly KSFT_SKIP=4 -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" - -cleanup() -{ - if [ "$?" = "0" ]; then - echo "selftests: test_xdp_meta [PASS]"; - else - echo "selftests: test_xdp_meta [FAILED]"; - fi - - set +e - ip link del veth1 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -} - -ip link set dev lo xdp off 2>/dev/null > /dev/null -if [ $? -ne 0 ];then - echo "selftests: [SKIP] Could not run test without the ip xdp support" - exit $KSFT_SKIP -fi -set -e - -ip netns add ${NS1} -ip netns add ${NS2} - -trap cleanup 0 2 3 6 9 - -ip link add veth1 type veth peer name veth2 - -ip link set veth1 netns ${NS1} -ip link set veth2 netns ${NS2} - -ip netns exec ${NS1} ip addr add 10.1.1.11/24 dev veth1 -ip netns exec ${NS2} ip addr add 10.1.1.22/24 dev veth2 - -ip netns exec ${NS1} tc qdisc add dev veth1 clsact -ip netns exec ${NS2} tc qdisc add dev veth2 clsact - -ip netns exec ${NS1} tc filter add dev veth1 ingress bpf da obj ${BPF_FILE} sec t -ip netns exec ${NS2} tc filter add dev veth2 ingress bpf da obj ${BPF_FILE} sec t - -ip netns exec ${NS1} ip link set dev veth1 xdp obj ${BPF_FILE} sec x -ip netns exec ${NS2} ip link set dev veth2 xdp obj ${BPF_FILE} sec x - -ip netns exec ${NS1} ip link set dev veth1 up -ip netns exec ${NS2} ip link set dev veth2 up - -ip netns exec ${NS1} ping -c 1 10.1.1.22 -ip netns exec ${NS2} ping -c 1 10.1.1.11 - -exit 0 diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh deleted file mode 100755 index 0746a4fde9d3..000000000000 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash -# Create 2 namespaces with two veth peers, and -# forward packets in-between using generic XDP -# -# NS1(veth11) NS2(veth22) -# | | -# | | -# (veth1, ------ (veth2, -# id:111) id:222) -# | xdp forwarding | -# ------------------ - -readonly NS1="ns1-$(mktemp -u XXXXXX)" -readonly NS2="ns2-$(mktemp -u XXXXXX)" -ret=0 - -setup() -{ - - local xdpmode=$1 - - ip netns add ${NS1} - ip netns add ${NS2} - - ip link add veth1 index 111 type veth peer name veth11 netns ${NS1} - ip link add veth2 index 222 type veth peer name veth22 netns ${NS2} - - ip link set veth1 up - ip link set veth2 up - ip -n ${NS1} link set dev veth11 up - ip -n ${NS2} link set dev veth22 up - - ip -n ${NS1} addr add 10.1.1.11/24 dev veth11 - ip -n ${NS2} addr add 10.1.1.22/24 dev veth22 -} - -cleanup() -{ - ip link del veth1 2> /dev/null - ip link del veth2 2> /dev/null - ip netns del ${NS1} 2> /dev/null - ip netns del ${NS2} 2> /dev/null -} - -test_xdp_redirect() -{ - local xdpmode=$1 - - setup - - ip link set dev veth1 $xdpmode off &> /dev/null - if [ $? -ne 0 ];then - echo "selftests: test_xdp_redirect $xdpmode [SKIP]" - return 0 - fi - - ip -n ${NS1} link set veth11 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null - ip -n ${NS2} link set veth22 $xdpmode obj xdp_dummy.bpf.o sec xdp &> /dev/null - ip link set dev veth1 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_222 &> /dev/null - ip link set dev veth2 $xdpmode obj test_xdp_redirect.bpf.o sec redirect_to_111 &> /dev/null - - if ip netns exec ${NS1} ping -c 1 10.1.1.22 &> /dev/null && - ip netns exec ${NS2} ping -c 1 10.1.1.11 &> /dev/null; then - echo "selftests: test_xdp_redirect $xdpmode [PASS]"; - else - ret=1 - echo "selftests: test_xdp_redirect $xdpmode [FAILED]"; - fi - - cleanup -} - -set -e -trap cleanup 2 3 6 9 - -test_xdp_redirect xdpgeneric -test_xdp_redirect xdpdrv - -exit $ret diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 7afc2619ab14..18596ae0b0c1 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -2252,7 +2252,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_48b = { 7 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8", + .errstr_unpriv = "invalid read from stack R2 off -8+0 size 8", .result_unpriv = REJECT, /* in privileged mode reads from uninitialized stack locations are permitted */ .result = ACCEPT, diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index f420c0312aa0..4b39f8472f9b 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -373,7 +373,7 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_kptr = { 1 }, .result = REJECT, - .errstr = "Unreleased reference id=5 alloc_insn=20", + .errstr = "Unreleased reference id=4 alloc_insn=20", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 15 }, } diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index e12ef953fba8..06af5029885b 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -21,11 +21,20 @@ #include <gelf.h> #include <float.h> #include <math.h> +#include <limits.h> #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + enum stat_id { VERDICT, DURATION, @@ -34,6 +43,11 @@ enum stat_id { PEAK_STATES, MAX_STATES_PER_INSN, MARK_READ_MAX_LEN, + SIZE, + JITED_SIZE, + STACK, + PROG_TYPE, + ATTACH_TYPE, FILE_NAME, PROG_NAME, @@ -203,7 +217,8 @@ const char argp_program_doc[] = "\n" "USAGE: veristat <obj-file> [<obj-file>...]\n" " OR: veristat -C <baseline.csv> <comparison.csv>\n" -" OR: veristat -R <results.csv>\n"; +" OR: veristat -R <results.csv>\n" +" OR: veristat -vl2 <to_analyze.bpf.o>\n"; enum { OPT_LOG_FIXED = 1000, @@ -215,7 +230,7 @@ static const struct argp_option opts[] = { { "version", 'V', NULL, 0, "Print version" }, { "verbose", 'v', NULL, 0, "Verbose mode" }, { "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" }, - { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, + { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode, 2 for full verification log)" }, { "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" }, { "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" }, { "top-n", 'n', "N", 0, "Emit only up to first N results." }, @@ -640,19 +655,21 @@ cleanup: } static const struct stat_specs default_output_spec = { - .spec_cnt = 7, + .spec_cnt = 8, .ids = { FILE_NAME, PROG_NAME, VERDICT, DURATION, - TOTAL_INSNS, TOTAL_STATES, PEAK_STATES, + TOTAL_INSNS, TOTAL_STATES, SIZE, JITED_SIZE }, }; static const struct stat_specs default_csv_output_spec = { - .spec_cnt = 9, + .spec_cnt = 14, .ids = { FILE_NAME, PROG_NAME, VERDICT, DURATION, TOTAL_INSNS, TOTAL_STATES, PEAK_STATES, MAX_STATES_PER_INSN, MARK_READ_MAX_LEN, + SIZE, JITED_SIZE, PROG_TYPE, ATTACH_TYPE, + STACK, }, }; @@ -688,6 +705,11 @@ static struct stat_def { [PEAK_STATES] = { "Peak states", {"peak_states"}, }, [MAX_STATES_PER_INSN] = { "Max states per insn", {"max_states_per_insn"}, }, [MARK_READ_MAX_LEN] = { "Max mark read length", {"max_mark_read_len", "mark_read"}, }, + [SIZE] = { "Program size", {"prog_size"}, }, + [JITED_SIZE] = { "Jited size", {"prog_size_jited"}, }, + [STACK] = {"Stack depth", {"stack_depth", "stack"}, }, + [PROG_TYPE] = { "Program type", {"prog_type"}, }, + [ATTACH_TYPE] = { "Attach type", {"attach_type", }, }, }; static bool parse_stat_id_var(const char *name, size_t len, int *id, @@ -835,7 +857,8 @@ static char verif_log_buf[64 * 1024]; static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats *s) { const char *cur; - int pos, lines; + int pos, lines, sub_stack, cnt = 0; + char *state = NULL, *token, stack[512]; buf[buf_sz - 1] = '\0'; @@ -853,15 +876,22 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * if (1 == sscanf(cur, "verification time %ld usec\n", &s->stats[DURATION])) continue; - if (6 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld", + if (5 == sscanf(cur, "processed %ld insns (limit %*d) max_states_per_insn %ld total_states %ld peak_states %ld mark_read %ld", &s->stats[TOTAL_INSNS], &s->stats[MAX_STATES_PER_INSN], &s->stats[TOTAL_STATES], &s->stats[PEAK_STATES], &s->stats[MARK_READ_MAX_LEN])) continue; - } + if (1 == sscanf(cur, "stack depth %511s", stack)) + continue; + } + while ((token = strtok_r(cnt++ ? NULL : stack, "+", &state))) { + if (sscanf(token, "%d", &sub_stack) == 0) + break; + s->stats[STACK] += sub_stack; + } return 0; } @@ -884,7 +914,7 @@ static int line_cnt_cmp(const void *a, const void *b) const struct line_cnt *b_cnt = (const struct line_cnt *)b; if (a_cnt->cnt != b_cnt->cnt) - return a_cnt->cnt < b_cnt->cnt ? -1 : 1; + return a_cnt->cnt > b_cnt->cnt ? -1 : 1; return strcmp(a_cnt->line, b_cnt->line); } @@ -1032,6 +1062,41 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name, return -ESRCH; } +/* Make sure only target program is referenced from struct_ops map, + * otherwise libbpf would automatically set autocreate for all + * referenced programs. + * See libbpf.c:bpf_object_adjust_struct_ops_autoload. + */ +static void mask_unrelated_struct_ops_progs(struct bpf_object *obj, + struct bpf_map *map, + struct bpf_program *prog) +{ + struct btf *btf = bpf_object__btf(obj); + const struct btf_type *t, *mt; + struct btf_member *m; + int i, moff; + size_t data_sz, ptr_sz = sizeof(void *); + void *data; + + t = btf__type_by_id(btf, bpf_map__btf_value_type_id(map)); + if (!btf_is_struct(t)) + return; + + data = bpf_map__initial_value(map, &data_sz); + for (i = 0; i < btf_vlen(t); i++) { + m = &btf_members(t)[i]; + mt = btf__type_by_id(btf, m->type); + if (!btf_is_ptr(mt)) + continue; + moff = m->offset / 8; + if (moff + ptr_sz > data_sz) + continue; + if (memcmp(data + moff, &prog, ptr_sz) == 0) + continue; + memset(data + moff, 0, ptr_sz); + } +} + static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename) { struct bpf_map *map; @@ -1047,6 +1112,9 @@ static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const ch case BPF_MAP_TYPE_INODE_STORAGE: case BPF_MAP_TYPE_CGROUP_STORAGE: break; + case BPF_MAP_TYPE_STRUCT_OPS: + mask_unrelated_struct_ops_progs(obj, map, prog); + break; default: if (bpf_map__max_entries(map) == 0) bpf_map__set_max_entries(map, 1); @@ -1146,8 +1214,11 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf char *buf; int buf_sz, log_level; struct verif_stats *stats; + struct bpf_prog_info info; + __u32 info_len = sizeof(info); int err = 0; void *tmp; + int fd; if (!should_process_file_prog(base_filename, bpf_program__name(prog))) { env.progs_skipped++; @@ -1196,6 +1267,15 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf stats->file_name = strdup(base_filename); stats->prog_name = strdup(bpf_program__name(prog)); stats->stats[VERDICT] = err == 0; /* 1 - success, 0 - failure */ + stats->stats[SIZE] = bpf_program__insn_cnt(prog); + stats->stats[PROG_TYPE] = bpf_program__type(prog); + stats->stats[ATTACH_TYPE] = bpf_program__expected_attach_type(prog); + + memset(&info, 0, info_len); + fd = bpf_program__fd(prog); + if (fd > 0 && bpf_prog_get_info_by_fd(fd, &info, &info_len) == 0) + stats->stats[JITED_SIZE] = info.jited_prog_len; + parse_verif_log(buf, buf_sz, stats); if (env.verbose) { @@ -1309,6 +1389,11 @@ static int cmp_stat(const struct verif_stats *s1, const struct verif_stats *s2, case PROG_NAME: cmp = strcmp(s1->prog_name, s2->prog_name); break; + case ATTACH_TYPE: + case PROG_TYPE: + case SIZE: + case JITED_SIZE: + case STACK: case VERDICT: case DURATION: case TOTAL_INSNS: @@ -1523,12 +1608,27 @@ static void prepare_value(const struct verif_stats *s, enum stat_id id, else *str = s->stats[VERDICT] ? "success" : "failure"; break; + case ATTACH_TYPE: + if (!s) + *str = "N/A"; + else + *str = libbpf_bpf_attach_type_str(s->stats[ATTACH_TYPE]) ?: "N/A"; + break; + case PROG_TYPE: + if (!s) + *str = "N/A"; + else + *str = libbpf_bpf_prog_type_str(s->stats[PROG_TYPE]) ?: "N/A"; + break; case DURATION: case TOTAL_INSNS: case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: case MARK_READ_MAX_LEN: + case STACK: + case SIZE: + case JITED_SIZE: *val = s ? s->stats[id] : 0; break; default: @@ -1612,7 +1712,10 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats case TOTAL_STATES: case PEAK_STATES: case MAX_STATES_PER_INSN: - case MARK_READ_MAX_LEN: { + case MARK_READ_MAX_LEN: + case SIZE: + case JITED_SIZE: + case STACK: { long val; int err, n; @@ -1625,6 +1728,42 @@ static int parse_stat_value(const char *str, enum stat_id id, struct verif_stats st->stats[id] = val; break; } + case PROG_TYPE: { + enum bpf_prog_type prog_type = 0; + const char *type; + + while ((type = libbpf_bpf_prog_type_str(prog_type))) { + if (strcmp(type, str) == 0) { + st->stats[id] = prog_type; + break; + } + prog_type++; + } + + if (!type) { + fprintf(stderr, "Unrecognized prog type %s\n", str); + return -EINVAL; + } + break; + } + case ATTACH_TYPE: { + enum bpf_attach_type attach_type = 0; + const char *type; + + while ((type = libbpf_bpf_attach_type_str(attach_type))) { + if (strcmp(type, str) == 0) { + st->stats[id] = attach_type; + break; + } + attach_type++; + } + + if (!type) { + fprintf(stderr, "Unrecognized attach type %s\n", str); + return -EINVAL; + } + break; + } default: fprintf(stderr, "Unrecognized stat #%d\n", id); return -EINVAL; diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 6f9956eed797..6f7b15d6c6ed 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -27,7 +27,7 @@ #include <linux/errqueue.h> #include <linux/if_link.h> #include <linux/net_tstamp.h> -#include <linux/udp.h> +#include <netinet/udp.h> #include <linux/sockios.h> #include <linux/if_xdp.h> #include <sys/mman.h> @@ -79,7 +79,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id) .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, - .flags = XSK_UMEM__DEFAULT_FLAGS, + .flags = XDP_UMEM_TX_METADATA_LEN, .tx_metadata_len = sizeof(struct xsk_tx_metadata), }; __u32 idx = 0; @@ -551,6 +551,7 @@ static void hwtstamp_enable(const char *ifname) { struct hwtstamp_config cfg = { .rx_filter = HWTSTAMP_FILTER_ALL, + .tx_type = HWTSTAMP_TX_ON, }; hwtstamp_ioctl(SIOCGHWTSTAMP, ifname, &saved_hwtstamp_cfg); |