diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-21 09:27:50 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-21 09:27:50 -0700 |
commit | 440b65232829fad69947b8de983c13a525cc8871 (patch) | |
tree | 3cab57fca48b43ba0e11804683b33b71743494c6 /samples | |
parent | 1ec6d097897a35dfb55c4c31fc8633cf5be46497 (diff) | |
parent | 5277d130947ba8c0d54c16eed89eb97f0b6d2e5a (diff) | |
download | lwn-440b65232829fad69947b8de983c13a525cc8871.tar.gz lwn-440b65232829fad69947b8de983c13a525cc8871.zip |
Merge tag 'bpf-next-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov:
- Introduce '__attribute__((bpf_fastcall))' for helpers and kfuncs with
corresponding support in LLVM.
It is similar to existing 'no_caller_saved_registers' attribute in
GCC/LLVM with a provision for backward compatibility. It allows
compilers generate more efficient BPF code assuming the verifier or
JITs will inline or partially inline a helper/kfunc with such
attribute. bpf_cast_to_kern_ctx, bpf_rdonly_cast,
bpf_get_smp_processor_id are the first set of such helpers.
- Harden and extend ELF build ID parsing logic.
When called from sleepable context the relevants parts of ELF file
will be read to find and fetch .note.gnu.build-id information. Also
harden the logic to avoid TOCTOU, overflow, out-of-bounds problems.
- Improvements and fixes for sched-ext:
- Allow passing BPF iterators as kfunc arguments
- Make the pointer returned from iter_next method trusted
- Fix x86 JIT convergence issue due to growing/shrinking conditional
jumps in variable length encoding
- BPF_LSM related:
- Introduce few VFS kfuncs and consolidate them in
fs/bpf_fs_kfuncs.c
- Enforce correct range of return values from certain LSM hooks
- Disallow attaching to other LSM hooks
- Prerequisite work for upcoming Qdisc in BPF:
- Allow kptrs in program provided structs
- Support for gen_epilogue in verifier_ops
- Important fixes:
- Fix uprobe multi pid filter check
- Fix bpf_strtol and bpf_strtoul helpers
- Track equal scalars history on per-instruction level
- Fix tailcall hierarchy on x86 and arm64
- Fix signed division overflow to prevent INT_MIN/-1 trap on x86
- Fix get kernel stack in BPF progs attached to tracepoint:syscall
- Selftests:
- Add uprobe bench/stress tool
- Generate file dependencies to drastically improve re-build time
- Match JIT-ed and BPF asm with __xlated/__jited keywords
- Convert older tests to test_progs framework
- Add support for RISC-V
- Few fixes when BPF programs are compiled with GCC-BPF backend
(support for GCC-BPF in BPF CI is ongoing in parallel)
- Add traffic monitor
- Enable cross compile and musl libc
* tag 'bpf-next-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (260 commits)
btf: require pahole 1.21+ for DEBUG_INFO_BTF with default DWARF version
btf: move pahole check in scripts/link-vmlinux.sh to lib/Kconfig.debug
btf: remove redundant CONFIG_BPF test in scripts/link-vmlinux.sh
bpf: Call the missed kfree() when there is no special field in btf
bpf: Call the missed btf_record_free() when map creation fails
selftests/bpf: Add a test case to write mtu result into .rodata
selftests/bpf: Add a test case to write strtol result into .rodata
selftests/bpf: Rename ARG_PTR_TO_LONG test description
selftests/bpf: Fix ARG_PTR_TO_LONG {half-,}uninitialized test
bpf: Zero former ARG_PTR_TO_{LONG,INT} args in case of error
bpf: Improve check_raw_mode_ok test for MEM_UNINIT-tagged types
bpf: Fix helper writes to read-only maps
bpf: Remove truncation test in bpf_strtol and bpf_strtoul helpers
bpf: Fix bpf_strtol and bpf_strtoul helpers for 32bit
selftests/bpf: Add tests for sdiv/smod overflow cases
bpf: Fix a sdiv overflow issue
libbpf: Add bpf_object__token_fd accessor
docs/bpf: Add missing BPF program types to docs
docs/bpf: Add constant values for linkages
bpf: Use fake pt_regs when doing bpf syscall tracepoint tracing
...
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/Makefile | 9 | ||||
-rw-r--r-- | samples/bpf/tracex2.bpf.c | 99 | ||||
-rw-r--r-- | samples/bpf/tracex2_user.c | 187 | ||||
-rw-r--r-- | samples/bpf/tracex4.bpf.c | 4 |
4 files changed, 7 insertions, 292 deletions
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 3e003dd6bea0..7afe040cf43b 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -13,7 +13,6 @@ tprogs-y += sockex1 tprogs-y += sockex2 tprogs-y += sockex3 tprogs-y += tracex1 -tprogs-y += tracex2 tprogs-y += tracex3 tprogs-y += tracex4 tprogs-y += tracex5 @@ -63,7 +62,6 @@ sockex1-objs := sockex1_user.o sockex2-objs := sockex2_user.o sockex3-objs := sockex3_user.o tracex1-objs := tracex1_user.o $(TRACE_HELPERS) -tracex2-objs := tracex2_user.o tracex3-objs := tracex3_user.o tracex4-objs := tracex4_user.o tracex5-objs := tracex5_user.o $(TRACE_HELPERS) @@ -105,7 +103,6 @@ always-y += sockex1_kern.o always-y += sockex2_kern.o always-y += sockex3_kern.o always-y += tracex1.bpf.o -always-y += tracex2.bpf.o always-y += tracex3.bpf.o always-y += tracex4.bpf.o always-y += tracex5.bpf.o @@ -169,6 +166,10 @@ BPF_EXTRA_CFLAGS += -I$(srctree)/arch/mips/include/asm/mach-generic endif endif +ifeq ($(ARCH), x86) +BPF_EXTRA_CFLAGS += -fcf-protection +endif + TPROGS_CFLAGS += -Wall -O2 TPROGS_CFLAGS += -Wmissing-prototypes TPROGS_CFLAGS += -Wstrict-prototypes @@ -405,7 +406,7 @@ $(obj)/%.o: $(src)/%.c -Wno-gnu-variable-sized-type-not-at-end \ -Wno-address-of-packed-member -Wno-tautological-compare \ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ - -fno-asynchronous-unwind-tables -fcf-protection \ + -fno-asynchronous-unwind-tables \ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \ diff --git a/samples/bpf/tracex2.bpf.c b/samples/bpf/tracex2.bpf.c deleted file mode 100644 index 0a5c75b367be..000000000000 --- a/samples/bpf/tracex2.bpf.c +++ /dev/null @@ -1,99 +0,0 @@ -/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. - */ -#include "vmlinux.h" -#include <linux/version.h> -#include <bpf/bpf_helpers.h> -#include <bpf/bpf_tracing.h> -#include <bpf/bpf_core_read.h> - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __type(key, long); - __type(value, long); - __uint(max_entries, 1024); -} my_map SEC(".maps"); - -/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe - * example will no longer be meaningful - */ -SEC("kprobe/kfree_skb_reason") -int bpf_prog2(struct pt_regs *ctx) -{ - long loc = 0; - long init_val = 1; - long *value; - - /* read ip of kfree_skb_reason caller. - * non-portable version of __builtin_return_address(0) - */ - BPF_KPROBE_READ_RET_IP(loc, ctx); - - value = bpf_map_lookup_elem(&my_map, &loc); - if (value) - *value += 1; - else - bpf_map_update_elem(&my_map, &loc, &init_val, BPF_ANY); - return 0; -} - -static unsigned int log2(unsigned int v) -{ - unsigned int r; - unsigned int shift; - - r = (v > 0xFFFF) << 4; v >>= r; - shift = (v > 0xFF) << 3; v >>= shift; r |= shift; - shift = (v > 0xF) << 2; v >>= shift; r |= shift; - shift = (v > 0x3) << 1; v >>= shift; r |= shift; - r |= (v >> 1); - return r; -} - -static unsigned int log2l(unsigned long v) -{ - unsigned int hi = v >> 32; - if (hi) - return log2(hi) + 32; - else - return log2(v); -} - -struct hist_key { - char comm[16]; - u64 pid_tgid; - u64 uid_gid; - u64 index; -}; - -struct { - __uint(type, BPF_MAP_TYPE_PERCPU_HASH); - __uint(key_size, sizeof(struct hist_key)); - __uint(value_size, sizeof(long)); - __uint(max_entries, 1024); -} my_hist_map SEC(".maps"); - -SEC("ksyscall/write") -int BPF_KSYSCALL(bpf_prog3, unsigned int fd, const char *buf, size_t count) -{ - long init_val = 1; - long *value; - struct hist_key key; - - key.index = log2l(count); - key.pid_tgid = bpf_get_current_pid_tgid(); - key.uid_gid = bpf_get_current_uid_gid(); - bpf_get_current_comm(&key.comm, sizeof(key.comm)); - - value = bpf_map_lookup_elem(&my_hist_map, &key); - if (value) - __sync_fetch_and_add(value, 1); - else - bpf_map_update_elem(&my_hist_map, &key, &init_val, BPF_ANY); - return 0; -} -char _license[] SEC("license") = "GPL"; -u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/tracex2_user.c b/samples/bpf/tracex2_user.c deleted file mode 100644 index 2131f1648cf1..000000000000 --- a/samples/bpf/tracex2_user.c +++ /dev/null @@ -1,187 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <stdio.h> -#include <unistd.h> -#include <stdlib.h> -#include <signal.h> -#include <string.h> - -#include <bpf/bpf.h> -#include <bpf/libbpf.h> -#include "bpf_util.h" - -#define MAX_INDEX 64 -#define MAX_STARS 38 - -/* my_map, my_hist_map */ -static int map_fd[2]; - -static void stars(char *str, long val, long max, int width) -{ - int i; - - for (i = 0; i < (width * val / max) - 1 && i < width - 1; i++) - str[i] = '*'; - if (val > max) - str[i - 1] = '+'; - str[i] = '\0'; -} - -struct task { - char comm[16]; - __u64 pid_tgid; - __u64 uid_gid; -}; - -struct hist_key { - struct task t; - __u32 index; -}; - -#define SIZE sizeof(struct task) - -static void print_hist_for_pid(int fd, void *task) -{ - unsigned int nr_cpus = bpf_num_possible_cpus(); - struct hist_key key = {}, next_key; - long values[nr_cpus]; - char starstr[MAX_STARS]; - long value; - long data[MAX_INDEX] = {}; - int max_ind = -1; - long max_value = 0; - int i, ind; - - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - if (memcmp(&next_key, task, SIZE)) { - key = next_key; - continue; - } - bpf_map_lookup_elem(fd, &next_key, values); - value = 0; - for (i = 0; i < nr_cpus; i++) - value += values[i]; - ind = next_key.index; - data[ind] = value; - if (value && ind > max_ind) - max_ind = ind; - if (value > max_value) - max_value = value; - key = next_key; - } - - printf(" syscall write() stats\n"); - printf(" byte_size : count distribution\n"); - for (i = 1; i <= max_ind + 1; i++) { - stars(starstr, data[i - 1], max_value, MAX_STARS); - printf("%8ld -> %-8ld : %-8ld |%-*s|\n", - (1l << i) >> 1, (1l << i) - 1, data[i - 1], - MAX_STARS, starstr); - } -} - -static void print_hist(int fd) -{ - struct hist_key key = {}, next_key; - static struct task tasks[1024]; - int task_cnt = 0; - int i; - - while (bpf_map_get_next_key(fd, &key, &next_key) == 0) { - int found = 0; - - for (i = 0; i < task_cnt; i++) - if (memcmp(&tasks[i], &next_key, SIZE) == 0) - found = 1; - if (!found) - memcpy(&tasks[task_cnt++], &next_key, SIZE); - key = next_key; - } - - for (i = 0; i < task_cnt; i++) { - printf("\npid %d cmd %s uid %d\n", - (__u32) tasks[i].pid_tgid, - tasks[i].comm, - (__u32) tasks[i].uid_gid); - print_hist_for_pid(fd, &tasks[i]); - } - -} - -static void int_exit(int sig) -{ - print_hist(map_fd[1]); - exit(0); -} - -int main(int ac, char **argv) -{ - long key, next_key, value; - struct bpf_link *links[2]; - struct bpf_program *prog; - struct bpf_object *obj; - char filename[256]; - int i, j = 0; - FILE *f; - - snprintf(filename, sizeof(filename), "%s.bpf.o", argv[0]); - obj = bpf_object__open_file(filename, NULL); - if (libbpf_get_error(obj)) { - fprintf(stderr, "ERROR: opening BPF object file failed\n"); - return 0; - } - - /* load BPF program */ - if (bpf_object__load(obj)) { - fprintf(stderr, "ERROR: loading BPF object file failed\n"); - goto cleanup; - } - - map_fd[0] = bpf_object__find_map_fd_by_name(obj, "my_map"); - map_fd[1] = bpf_object__find_map_fd_by_name(obj, "my_hist_map"); - if (map_fd[0] < 0 || map_fd[1] < 0) { - fprintf(stderr, "ERROR: finding a map in obj file failed\n"); - goto cleanup; - } - - signal(SIGINT, int_exit); - signal(SIGTERM, int_exit); - - /* start 'ping' in the background to have some kfree_skb_reason - * events */ - f = popen("ping -4 -c5 localhost", "r"); - (void) f; - - /* start 'dd' in the background to have plenty of 'write' syscalls */ - f = popen("dd if=/dev/zero of=/dev/null count=5000000", "r"); - (void) f; - - bpf_object__for_each_program(prog, obj) { - links[j] = bpf_program__attach(prog); - if (libbpf_get_error(links[j])) { - fprintf(stderr, "ERROR: bpf_program__attach failed\n"); - links[j] = NULL; - goto cleanup; - } - j++; - } - - for (i = 0; i < 5; i++) { - key = 0; - while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) { - bpf_map_lookup_elem(map_fd[0], &next_key, &value); - printf("location 0x%lx count %ld\n", next_key, value); - key = next_key; - } - if (key) - printf("\n"); - sleep(1); - } - print_hist(map_fd[1]); - -cleanup: - for (j--; j >= 0; j--) - bpf_link__destroy(links[j]); - - bpf_object__close(obj); - return 0; -} diff --git a/samples/bpf/tracex4.bpf.c b/samples/bpf/tracex4.bpf.c index ca826750901a..d786492fd926 100644 --- a/samples/bpf/tracex4.bpf.c +++ b/samples/bpf/tracex4.bpf.c @@ -33,13 +33,13 @@ int bpf_prog1(struct pt_regs *ctx) return 0; } -SEC("kretprobe/kmem_cache_alloc_node") +SEC("kretprobe/kmem_cache_alloc_node_noprof") int bpf_prog2(struct pt_regs *ctx) { long ptr = PT_REGS_RC(ctx); long ip = 0; - /* get ip address of kmem_cache_alloc_node() caller */ + /* get ip address of kmem_cache_alloc_node_noprof() caller */ BPF_KRETPROBE_READ_RET_IP(ip, ctx); struct pair v = { |