diff options
Diffstat (limited to 'tools/sched_ext')
36 files changed, 4675 insertions, 322 deletions
diff --git a/tools/sched_ext/Kconfig b/tools/sched_ext/Kconfig new file mode 100644 index 000000000000..275bd97ae62b --- /dev/null +++ b/tools/sched_ext/Kconfig @@ -0,0 +1,61 @@ +# sched-ext mandatory options +# +CONFIG_BPF=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_SCHED_CLASS_EXT=y + +# Required by some rust schedulers (e.g. scx_p2dq) +# +CONFIG_KALLSYMS_ALL=y + +# Required on arm64 +# +# CONFIG_DEBUG_INFO_REDUCED is not set + +# LAVD tracks futex to give an additional time slice for futex holder +# (i.e., avoiding lock holder preemption) for better system-wide progress. +# LAVD first tries to use ftrace to trace futex function calls. +# If that is not available, it tries to use a tracepoint. +CONFIG_FUNCTION_TRACER=y + +# Enable scheduling debugging +# +CONFIG_SCHED_DEBUG=y + +# Enable extra scheduling features (for a better code coverage while testing +# the schedulers) +# +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_CORE=y +CONFIG_SCHED_MC=y + +# Enable fully preemptible kernel for a better test coverage of the schedulers +# +# CONFIG_PREEMPT_NONE is not set +# CONFIG_PREEMPT_VOLUNTARY is not set +CONFIG_PREEMPT=y +CONFIG_PREEMPT_DYNAMIC=y + +# Additional debugging information (useful to catch potential locking issues) +CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_PROVE_LOCKING=y + +# Bpftrace headers (for additional debug info) +CONFIG_BPF_EVENTS=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_KPROBES=y +CONFIG_KPROBE_EVENTS=y +CONFIG_UPROBES=y +CONFIG_UPROBE_EVENTS=y +CONFIG_DEBUG_FS=y + +# Enable access to kernel configuration and headers at runtime +CONFIG_IKHEADERS=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKCONFIG=y diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index ca3815e572d8..21554f089692 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -61,8 +61,8 @@ SCXOBJ_DIR := $(OBJ_DIR)/sched_ext BINDIR := $(OUTPUT_DIR)/bin BPFOBJ := $(BPFOBJ_DIR)/libbpf.a ifneq ($(CROSS_COMPILE),) -HOST_BUILD_DIR := $(OBJ_DIR)/host -HOST_OUTPUT_DIR := host-tools +HOST_BUILD_DIR := $(OBJ_DIR)/host/obj +HOST_OUTPUT_DIR := $(OBJ_DIR)/host HOST_INCLUDE_DIR := $(HOST_OUTPUT_DIR)/include else HOST_BUILD_DIR := $(OBJ_DIR) @@ -98,7 +98,7 @@ ifneq ($(LLVM),) CFLAGS += -Wno-unused-command-line-argument endif -LDFLAGS = -lelf -lz -lpthread +LDFLAGS += -lelf -lz -lpthread IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') @@ -122,6 +122,8 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -I../../include \ $(call get_sys_includes,$(CLANG)) \ -Wall -Wno-compare-distinct-pointer-types \ + -Wno-microsoft-anon-tag \ + -fms-extensions \ -O2 -mcpu=v3 # sort removes libbpf duplicates when not cross-building @@ -133,17 +135,30 @@ $(MAKE_DIRS): $(call msg,MKDIR,,$@) $(Q)mkdir -p $@ +ifneq ($(CROSS_COMPILE),) $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(OBJ_DIR)/libbpf - $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ + $(Q)$(MAKE) $(submake_extras) CROSS_COMPILE=$(CROSS_COMPILE) \ + -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ EXTRA_CFLAGS='-g -O0 -fPIC' \ + LDFLAGS="$(LDFLAGS)" \ DESTDIR=$(OUTPUT_DIR) prefix= all install_headers +endif + +$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ + $(APIDIR)/linux/bpf.h \ + | $(HOST_BUILD_DIR)/libbpf + $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ + OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ + ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \ + EXTRA_CFLAGS='-g -O0 -fPIC' \ + DESTDIR=$(HOST_OUTPUT_DIR) prefix= all install_headers $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ - ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \ + ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ @@ -176,7 +191,7 @@ $(INCLUDE_DIR)/%.bpf.skel.h: $(SCXOBJ_DIR)/%.bpf.o $(INCLUDE_DIR)/vmlinux.h $(BP SCX_COMMON_DEPS := include/scx/common.h include/scx/user_exit_info.h | $(BINDIR) -c-sched-targets = scx_simple scx_qmap scx_central scx_flatcg +c-sched-targets = scx_simple scx_cpu0 scx_qmap scx_central scx_flatcg scx_userland scx_pair scx_sdt $(addprefix $(BINDIR)/,$(c-sched-targets)): \ $(BINDIR)/%: \ @@ -185,7 +200,7 @@ $(addprefix $(BINDIR)/,$(c-sched-targets)): \ $(SCX_COMMON_DEPS) $(eval sched=$(notdir $@)) $(CC) $(CFLAGS) -c $(sched).c -o $(SCXOBJ_DIR)/$(sched).o - $(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(HOST_BPFOBJ) $(LDFLAGS) + $(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(BPFOBJ) $(LDFLAGS) $(c-sched-targets): %: $(BINDIR)/% diff --git a/tools/sched_ext/README.md b/tools/sched_ext/README.md index 16a42e4060f6..6e282bce453c 100644 --- a/tools/sched_ext/README.md +++ b/tools/sched_ext/README.md @@ -58,15 +58,8 @@ CONFIG_SCHED_CLASS_EXT=y CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y CONFIG_DEBUG_INFO_BTF=y -``` - -It's also recommended that you also include the following Kconfig options: - -``` CONFIG_BPF_JIT_ALWAYS_ON=y CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_PAHOLE_HAS_SPLIT_BTF=y -CONFIG_PAHOLE_HAS_BTF_TAG=y ``` There is a `Kconfig` file in this directory whose contents you can append to diff --git a/tools/sched_ext/include/scx/bpf_arena_common.bpf.h b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h new file mode 100644 index 000000000000..2043d66940ea --- /dev/null +++ b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +/* + * for older kernels try sizeof(struct genradix_node) + * or flexible: + * static inline long __bpf_page_size(void) { + * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node); + * } + * but generated code is not great. + */ +#endif + +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) +#ifndef __arena +#define __arena __attribute__((address_space(1))) +#endif +#define __arena_global __attribute__((address_space(1))) +#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ +#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ +#else + +/* emit instruction: + * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as + * + * This is a workaround for LLVM compiler versions without + * __BPF_FEATURE_ADDR_SPACE_CAST that do not automatically cast between arena + * pointers and native kernel/userspace ones. In this case we explicitly do so + * with cast_kern() and cast_user(). E.g., in the Linux kernel tree, + * tools/testing/selftests/bpf includes tests that use these macros to implement + * linked lists and hashtables backed by arena memory. In sched_ext, we use + * cast_kern() and cast_user() for compatibility with older LLVM toolchains. + */ +#ifndef bpf_addr_space_cast +#define bpf_addr_space_cast(var, dst_as, src_as)\ + asm volatile(".byte 0xBF; \ + .ifc %[reg], r0; \ + .byte 0x00; \ + .endif; \ + .ifc %[reg], r1; \ + .byte 0x11; \ + .endif; \ + .ifc %[reg], r2; \ + .byte 0x22; \ + .endif; \ + .ifc %[reg], r3; \ + .byte 0x33; \ + .endif; \ + .ifc %[reg], r4; \ + .byte 0x44; \ + .endif; \ + .ifc %[reg], r5; \ + .byte 0x55; \ + .endif; \ + .ifc %[reg], r6; \ + .byte 0x66; \ + .endif; \ + .ifc %[reg], r7; \ + .byte 0x77; \ + .endif; \ + .ifc %[reg], r8; \ + .byte 0x88; \ + .endif; \ + .ifc %[reg], r9; \ + .byte 0x99; \ + .endif; \ + .short %[off]; \ + .long %[as]" \ + : [reg]"+r"(var) \ + : [off]"i"(BPF_ADDR_SPACE_CAST) \ + , [as]"i"((dst_as << 16) | src_as)); +#endif + +#define __arena +#define __arena_global SEC(".addr_space.1") +#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1) +#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0) +#endif + +void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, + int node_id, __u64 flags) __ksym __weak; +void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; +int bpf_arena_reserve_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; + +/* + * Note that cond_break can only be portably used in the body of a breakable + * construct, whereas can_loop can be used anywhere. + */ +#ifdef SCX_BPF_UNITTEST +#define can_loop true +#define __cond_break(expr) expr +#else +#ifdef __BPF_FEATURE_MAY_GOTO +#define can_loop \ + ({ __label__ l_break, l_continue; \ + bool ret = true; \ + asm volatile goto("may_goto %l[l_break]" \ + :::: l_break); \ + goto l_continue; \ + l_break: ret = false; \ + l_continue:; \ + ret; \ + }) + +#define __cond_break(expr) \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("may_goto %l[l_break]" \ + :::: l_break); \ + goto l_continue; \ + l_break: expr; \ + l_continue:; \ + }) +#else +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +#define can_loop \ + ({ __label__ l_break, l_continue; \ + bool ret = true; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: ret = false; \ + l_continue:; \ + ret; \ + }) + +#define __cond_break(expr) \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: expr; \ + l_continue:; \ + }) +#else +#define can_loop \ + ({ __label__ l_break, l_continue; \ + bool ret = true; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: ret = false; \ + l_continue:; \ + ret; \ + }) + +#define __cond_break(expr) \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: expr; \ + l_continue:; \ + }) +#endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */ +#endif /* __BPF_FEATURE_MAY_GOTO */ +#endif /* SCX_BPF_UNITTEST */ + +#define cond_break __cond_break(break) +#define cond_break_label(label) __cond_break(goto label) + + +void bpf_preempt_disable(void) __weak __ksym; +void bpf_preempt_enable(void) __weak __ksym; +ssize_t bpf_arena_mapping_nr_pages(void *p__map) __weak __ksym; diff --git a/tools/sched_ext/include/scx/bpf_arena_common.h b/tools/sched_ext/include/scx/bpf_arena_common.h new file mode 100644 index 000000000000..10141db0b59d --- /dev/null +++ b/tools/sched_ext/include/scx/bpf_arena_common.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once + +#ifndef arena_container_of +#define arena_container_of(ptr, type, member) \ + ({ \ + void __arena *__mptr = (void __arena *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +/* Provide the definition of PAGE_SIZE. */ +#include <sys/user.h> + +#define __arena +#define __arg_arena +#define cast_kern(ptr) /* nop for user space */ +#define cast_user(ptr) /* nop for user space */ +char __attribute__((weak)) arena[1]; + +#ifndef offsetof +#define offsetof(type, member) ((unsigned long)&((type *)0)->member) +#endif + +static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt, + int node_id, __u64 flags) +{ + return NULL; +} +static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) +{ +} diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index 7849405614b1..19459dedde41 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -7,6 +7,13 @@ #ifndef __SCX_COMMON_BPF_H #define __SCX_COMMON_BPF_H +/* + * The generated kfunc prototypes in vmlinux.h are missing address space + * attributes which cause build failures. For now, suppress the generated + * prototypes. See https://github.com/sched-ext/scx/issues/1111. + */ +#define BPF_NO_KFUNC_PROTOTYPES + #ifdef LSP #define __bpf__ #include "../vmlinux.h" @@ -17,13 +24,26 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <asm-generic/errno.h> -#include "user_exit_info.h" +#include "user_exit_info.bpf.h" +#include "enum_defs.autogen.h" +#define PF_IDLE 0x00000002 /* I am an IDLE thread */ +#define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ +#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ +#define PF_KSWAPD 0x00020000 /* I am kswapd */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_EXITING 0x00000004 #define CLOCK_MONOTONIC 1 +#ifndef NR_CPUS +#define NR_CPUS 1024 +#endif + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + extern int LINUX_KERNEL_VERSION __kconfig; extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; @@ -40,19 +60,15 @@ static inline void ___vmlinux_h_sanity_check___(void) s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; -void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; -void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; +s32 __scx_bpf_select_cpu_and(struct task_struct *p, const struct cpumask *cpus_allowed, + struct scx_bpf_select_cpu_and_args *args) __ksym __weak; +bool __scx_bpf_dsq_insert_vtime(struct task_struct *p, struct scx_bpf_dsq_insert_vtime_args *args) __ksym __weak; u32 scx_bpf_dispatch_nr_slots(void) __ksym; void scx_bpf_dispatch_cancel(void) __ksym; -bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak; -void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; -void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; -bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; -bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; -u32 scx_bpf_reenqueue_local(void) __ksym; void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; +struct task_struct *scx_bpf_dsq_peek(u64 dsq_id) __ksym __weak; int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id, u64 flags) __ksym __weak; struct task_struct *bpf_iter_scx_dsq_next(struct bpf_iter_scx_dsq *it) __ksym __weak; void bpf_iter_scx_dsq_destroy(struct bpf_iter_scx_dsq *it) __ksym __weak; @@ -62,21 +78,29 @@ void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak; u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak; void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak; +u32 scx_bpf_nr_node_ids(void) __ksym __weak; u32 scx_bpf_nr_cpu_ids(void) __ksym __weak; +int scx_bpf_cpu_node(s32 cpu) __ksym __weak; const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak; const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak; void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak; +const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak; const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym; +const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak; const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym; void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; +s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; +s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak; s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym; bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; -struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; +struct rq *scx_bpf_locked_rq(void) __ksym; +struct task_struct *scx_bpf_cpu_curr(s32 cpu) __ksym __weak; u64 scx_bpf_now(void) __ksym __weak; +void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak; /* * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from @@ -84,9 +108,15 @@ u64 scx_bpf_now(void) __ksym __weak; */ #define BPF_FOR_EACH_ITER (&___it) +#define scx_read_event(e, name) \ + (bpf_core_field_exists((e)->name) ? (e)->name : 0) + static inline __attribute__((format(printf, 1, 2))) void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} +#define SCX_STRINGIFY(x) #x +#define SCX_TOSTRING(x) SCX_STRINGIFY(x) + /* * Helper macro for initializing the fmt and variadic argument inputs to both * bstr exit kfuncs. Callers to this function should use ___fmt and ___param to @@ -121,13 +151,15 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments * instead of an array of u64. Invoking this macro will cause the scheduler to * exit in an erroneous state, with diagnostic information being passed to the - * user. + * user. It appends the file and line number to aid debugging. */ #define scx_bpf_error(fmt, args...) \ ({ \ - scx_bpf_bstr_preamble(fmt, args) \ + scx_bpf_bstr_preamble( \ + __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args) \ scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ - ___scx_bpf_bstr_format_checker(fmt, ##args); \ + ___scx_bpf_bstr_format_checker( \ + __FILE__ ":" SCX_TOSTRING(__LINE__) ": " fmt, ##args); \ }) /* @@ -209,6 +241,7 @@ BPF_PROG(name, ##args) * be a pointer to the area. Use `MEMBER_VPTR(*ptr, .member)` instead of * `MEMBER_VPTR(ptr, ->member)`. */ +#ifndef MEMBER_VPTR #define MEMBER_VPTR(base, member) (typeof((base) member) *) \ ({ \ u64 __base = (u64)&(base); \ @@ -225,6 +258,7 @@ BPF_PROG(name, ##args) [max]"i"(sizeof(base) - sizeof((base) member))); \ __addr; \ }) +#endif /* MEMBER_VPTR */ /** * ARRAY_ELEM_PTR - Obtain the verified pointer to an array element @@ -240,6 +274,7 @@ BPF_PROG(name, ##args) * size of the array to compute the max, which will result in rejection by * the verifier. */ +#ifndef ARRAY_ELEM_PTR #define ARRAY_ELEM_PTR(arr, i, n) (typeof(arr[i]) *) \ ({ \ u64 __base = (u64)arr; \ @@ -254,7 +289,51 @@ BPF_PROG(name, ##args) [max]"r"(sizeof(arr[0]) * ((n) - 1))); \ __addr; \ }) +#endif /* ARRAY_ELEM_PTR */ +/** + * __sink - Hide @expr's value from the compiler and BPF verifier + * @expr: The expression whose value should be opacified + * + * No-op at runtime. The empty inline assembly with a read-write constraint + * ("+g") has two effects at compile/verify time: + * + * 1. Compiler: treats @expr as both read and written, preventing dead-code + * elimination and keeping @expr (and any side effects that produced it) + * alive. + * + * 2. BPF verifier: forgets the precise value/range of @expr ("makes it + * imprecise"). The verifier normally tracks exact ranges for every register + * and stack slot. While useful, precision means each distinct value creates a + * separate verifier state. Inside loops this leads to state explosion - each + * iteration carries different precise values so states never merge and the + * verifier explores every iteration individually. + * + * Example - preventing loop state explosion:: + * + * u32 nr_intersects = 0, nr_covered = 0; + * __sink(nr_intersects); + * __sink(nr_covered); + * bpf_for(i, 0, nr_nodes) { + * if (intersects(cpumask, node_mask[i])) + * nr_intersects++; + * if (covers(cpumask, node_mask[i])) + * nr_covered++; + * } + * + * Without __sink(), the verifier tracks every possible (nr_intersects, + * nr_covered) pair across iterations, causing "BPF program is too large". With + * __sink(), the values become unknown scalars so all iterations collapse into + * one reusable state. + * + * Example - keeping a reference alive:: + * + * struct task_struct *t = bpf_task_acquire(task); + * __sink(t); + * + * Follows the convention from BPF selftests (bpf_misc.h). + */ +#define __sink(expr) asm volatile ("" : "+g"(expr)) /* * BPF declarations and helpers @@ -301,6 +380,7 @@ void bpf_task_release(struct task_struct *p) __ksym; /* cgroup */ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; +struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) __ksym; void bpf_cgroup_release(struct cgroup *cgrp) __ksym; struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; @@ -418,8 +498,27 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) */ static inline bool is_migration_disabled(const struct task_struct *p) { - if (bpf_core_field_exists(p->migration_disabled)) - return p->migration_disabled; + /* + * Testing p->migration_disabled in a BPF code is tricky because the + * migration is _always_ disabled while running the BPF code. + * The prolog (__bpf_prog_enter) and epilog (__bpf_prog_exit) for BPF + * code execution disable and re-enable the migration of the current + * task, respectively. So, the _current_ task of the sched_ext ops is + * always migration-disabled. Moreover, p->migration_disabled could be + * two or greater when a sched_ext ops BPF code (e.g., ops.tick) is + * executed in the middle of the other BPF code execution. + * + * Therefore, we should decide that the _current_ task is + * migration-disabled only when its migration_disabled count is greater + * than one. In other words, when p->migration_disabled == 1, there is + * an ambiguity, so we should check if @p is the current task or not. + */ + if (bpf_core_field_exists(p->migration_disabled)) { + if (p->migration_disabled == 1) + return bpf_get_current_task_btf() != p; + else + return p->migration_disabled; + } return false; } @@ -456,7 +555,7 @@ static inline s64 time_delta(u64 after, u64 before) */ static inline bool time_after(u64 a, u64 b) { - return (s64)(b - a) < 0; + return (s64)(b - a) < 0; } /** @@ -480,7 +579,7 @@ static inline bool time_before(u64 a, u64 b) */ static inline bool time_after_eq(u64 a, u64 b) { - return (s64)(a - b) >= 0; + return (s64)(a - b) >= 0; } /** @@ -527,9 +626,15 @@ static inline bool time_in_range_open(u64 a, u64 b, u64 c) */ /* useful compiler attributes */ +#ifndef likely #define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely #define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#ifndef __maybe_unused #define __maybe_unused __attribute__((__unused__)) +#endif /* * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They @@ -568,20 +673,68 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s } } -#define READ_ONCE(x) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__c = { 0 } }; \ - __read_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ +/* + * __unqual_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged, + * + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' + * is not type-compatible with 'signed char', and we define a separate case. + * + * This is copied verbatim from kernel's include/linux/compiler_types.h, but + * with default expression (for pointers) changed from (x) to (typeof(x)0). + * + * This is because LLVM has a bug where for lvalue (x), it does not get rid of + * an extra address_space qualifier, but does in case of rvalue (typeof(x)0). + * Hence, for pointers, we need to create an rvalue expression to get the + * desired type. See https://github.com/llvm/llvm-project/issues/53400. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type : (unsigned type)0, signed type : (signed type)0 + +#define __unqual_typeof(x) \ + typeof(_Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (typeof(x))0)) + +#define READ_ONCE(x) \ +({ \ + union { __unqual_typeof(x) __val; char __c[1]; } __u = \ + { .__c = { 0 } }; \ + __read_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ + __u.__val; \ }) -#define WRITE_ONCE(x, val) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__val = (val) }; \ - __write_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ +#define WRITE_ONCE(x, val) \ +({ \ + union { __unqual_typeof(x) __val; char __c[1]; } __u = \ + { .__val = (val) }; \ + __write_once_size((__unqual_typeof(x) *)&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +/* + * __calc_avg - Calculate exponential weighted moving average (EWMA) with + * @old and @new values. @decay represents how large the @old value remains. + * With a larger @decay value, the moving average changes slowly, exhibiting + * fewer fluctuations. + */ +#define __calc_avg(old, new, decay) ({ \ + typeof(decay) thr = 1 << (decay); \ + typeof(old) ret; \ + if (((old) < thr) || ((new) < thr)) { \ + if (((old) == 1) && ((new) == 0)) \ + ret = 0; \ + else \ + ret = ((old) - ((old) >> 1)) + ((new) >> 1); \ + } else { \ + ret = ((old) - ((old) >> (decay))) + ((new) >> (decay)); \ + } \ + ret; \ }) /* @@ -614,6 +767,274 @@ static inline u32 log2_u64(u64 v) return log2_u32(v) + 1; } +/* + * sqrt_u64 - Calculate the square root of value @x using Newton's method. + */ +static inline u64 __sqrt_u64(u64 x) +{ + if (x == 0 || x == 1) + return x; + + u64 r = ((1ULL << 32) > x) ? x : (1ULL << 32); + + for (int i = 0; i < 8; ++i) { + u64 q = x / r; + if (r <= q) + break; + r = (r + q) >> 1; + } + return r; +} + +/* + * ctzll -- Counts trailing zeros in an unsigned long long. If the input value + * is zero, the return value is undefined. + */ +static inline int ctzll(u64 v) +{ +#if (!defined(__BPF__) && defined(__SCX_TARGET_ARCH_x86)) || \ + (defined(__BPF__) && defined(__clang_major__) && __clang_major__ >= 19) + /* + * Use the ctz builtin when: (1) building for native x86, or + * (2) building for BPF with clang >= 19 (BPF backend supports + * the intrinsic from clang 19 onward; earlier versions hit + * "unimplemented opcode" in the backend). + */ + return __builtin_ctzll(v); +#else + /* + * If neither the target architecture nor the toolchains support ctzll, + * use software-based emulation. Let's use the De Bruijn sequence-based + * approach to find LSB fastly. See the details of De Bruijn sequence: + * + * https://en.wikipedia.org/wiki/De_Bruijn_sequence + * https://www.chessprogramming.org/BitScan#De_Bruijn_Multiplication + */ + const int lookup_table[64] = { + 0, 1, 48, 2, 57, 49, 28, 3, 61, 58, 50, 42, 38, 29, 17, 4, + 62, 55, 59, 36, 53, 51, 43, 22, 45, 39, 33, 30, 24, 18, 12, 5, + 63, 47, 56, 27, 60, 41, 37, 16, 54, 35, 52, 21, 44, 32, 23, 11, + 46, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, + }; + const u64 DEBRUIJN_CONSTANT = 0x03f79d71b4cb0a89ULL; + unsigned int index; + u64 lowest_bit; + const int *lt; + + if (v == 0) + return -1; + + /* + * Isolate the least significant bit (LSB). + * For example, if v = 0b...10100, then v & -v = 0b...00100 + */ + lowest_bit = v & -v; + + /* + * Each isolated bit produces a unique 6-bit value, guaranteed by the + * De Bruijn property. Calculate a unique index into the lookup table + * using the magic constant and a right shift. + * + * Multiplying by the 64-bit constant "spreads out" that 1-bit into a + * unique pattern in the top 6 bits. This uniqueness property is + * exactly what a De Bruijn sequence guarantees: Every possible 6-bit + * pattern (in top bits) occurs exactly once for each LSB position. So, + * the constant 0x03f79d71b4cb0a89ULL is carefully chosen to be a + * De Bruijn sequence, ensuring no collisions in the table index. + */ + index = (lowest_bit * DEBRUIJN_CONSTANT) >> 58; + + /* + * Lookup in a precomputed table. No collision is guaranteed by the + * De Bruijn property. + */ + lt = MEMBER_VPTR(lookup_table, [index]); + return (lt)? *lt : -1; +#endif +} + +/* + * Return a value proportionally scaled to the task's weight. + */ +static inline u64 scale_by_task_weight(const struct task_struct *p, u64 value) +{ + return (value * p->scx.weight) / 100; +} + +/* + * Return a value inversely proportional to the task's weight. + */ +static inline u64 scale_by_task_weight_inverse(const struct task_struct *p, u64 value) +{ + return value * 100 / p->scx.weight; +} + + +/* + * Get a random u64 from the kernel's pseudo-random generator. + */ +static inline u64 get_prandom_u64() +{ + return ((u64)bpf_get_prandom_u32() << 32) | bpf_get_prandom_u32(); +} + +/* + * Define the shadow structure to avoid a compilation error when + * vmlinux.h does not enable necessary kernel configs. The ___local + * suffix is a CO-RE convention that tells the loader to match this + * against the base struct rq in the kernel. The attribute + * preserve_access_index tells the compiler to generate a CO-RE + * relocation for these fields. + */ +struct rq___local { + /* + * A monotonically increasing clock per CPU. It is rq->clock minus + * cumulative IRQ time and hypervisor steal time. Unlike rq->clock, + * it does not advance during IRQ processing or hypervisor preemption. + * It does advance during idle (the idle task counts as a running task + * for this purpose). + */ + u64 clock_task; + /* + * Invariant version of clock_task scaled by CPU capacity and + * frequency. For example, clock_pelt advances 2x slower on a CPU + * with half the capacity. + * + * At idle exit, rq->clock_pelt jumps forward to resync with + * clock_task. The kernel's rq_clock_pelt() corrects for this jump + * by subtracting lost_idle_time, yielding a clock that appears + * continuous across idle transitions. scx_clock_pelt() mirrors + * rq_clock_pelt() by performing the same subtraction. + */ + u64 clock_pelt; + /* + * Accumulates the magnitude of each clock_pelt jump at idle exit. + * Subtracting this from clock_pelt gives rq_clock_pelt(): a + * continuous, capacity-invariant clock suitable for both task + * execution time stamping and cross-idle measurements. + */ + unsigned long lost_idle_time; + /* + * Shadow of paravirt_steal_clock() (the hypervisor's cumulative + * stolen time counter). Stays frozen while the hypervisor preempts + * the vCPU; catches up the next time update_rq_clock_task() is + * called. The delta is the stolen time not yet subtracted from + * clock_task. + * + * Unlike irqtime->total (a plain kernel-side field), the live stolen + * time counter lives in hypervisor-specific shared memory and has no + * kernel-side equivalent readable from BPF in a hypervisor-agnostic + * way. This field is therefore the only portable BPF-accessible + * approximation of cumulative steal time. + * + * Available only when CONFIG_PARAVIRT_TIME_ACCOUNTING is on. + */ + u64 prev_steal_time_rq; +} __attribute__((preserve_access_index)); + +extern struct rq runqueues __ksym; + +/* + * Define the shadow structure to avoid a compilation error when + * vmlinux.h does not enable necessary kernel configs. + */ +struct irqtime___local { + /* + * Cumulative IRQ time counter for this CPU, in nanoseconds. Advances + * immediately at the exit of every hardirq and non-ksoftirqd softirq + * via irqtime_account_irq(). ksoftirqd time is counted as normal + * task time and is NOT included. NMI time is also NOT included. + * + * The companion field irqtime->sync (struct u64_stats_sync) protects + * against 64-bit tearing on 32-bit architectures. On 64-bit kernels, + * u64_stats_sync is an empty struct and all seqcount operations are + * no-ops, so a plain BPF_CORE_READ of this field is safe. + * + * Available only when CONFIG_IRQ_TIME_ACCOUNTING is on. + */ + u64 total; +} __attribute__((preserve_access_index)); + +/* + * cpu_irqtime is a per-CPU variable defined only when + * CONFIG_IRQ_TIME_ACCOUNTING is on. Declare it as __weak so the BPF + * loader sets its address to 0 (rather than failing) when the symbol + * is absent from the running kernel. + */ +extern struct irqtime___local cpu_irqtime __ksym __weak; + +static inline struct rq___local *get_current_rq(u32 cpu) +{ + /* + * This is a workaround to get an rq pointer since we decided to + * deprecate scx_bpf_cpu_rq(). + * + * WARNING: The caller must hold the rq lock for @cpu. This is + * guaranteed when called from scheduling callbacks (ops.running, + * ops.stopping, ops.enqueue, ops.dequeue, ops.dispatch, etc.). + * There is no runtime check available in BPF for kernel spinlock + * state — correctness is enforced by calling context only. + */ + return (void *)bpf_per_cpu_ptr(&runqueues, cpu); +} + +static inline u64 scx_clock_task(u32 cpu) +{ + struct rq___local *rq = get_current_rq(cpu); + + /* Equivalent to the kernel's rq_clock_task(). */ + return rq ? rq->clock_task : 0; +} + +static inline u64 scx_clock_pelt(u32 cpu) +{ + struct rq___local *rq = get_current_rq(cpu); + + /* + * Equivalent to the kernel's rq_clock_pelt(): subtracts + * lost_idle_time from clock_pelt to absorb the jump that occurs + * when clock_pelt resyncs with clock_task at idle exit. The result + * is a continuous, capacity-invariant clock safe for both task + * execution time stamping and cross-idle measurements. + */ + return rq ? (rq->clock_pelt - rq->lost_idle_time) : 0; +} + +static inline u64 scx_clock_virt(u32 cpu) +{ + struct rq___local *rq; + + /* + * Check field existence before calling get_current_rq() so we avoid + * the per_cpu lookup entirely on kernels built without + * CONFIG_PARAVIRT_TIME_ACCOUNTING. + */ + if (!bpf_core_field_exists(((struct rq___local *)0)->prev_steal_time_rq)) + return 0; + + /* Lagging shadow of the kernel's paravirt_steal_clock(). */ + rq = get_current_rq(cpu); + return rq ? BPF_CORE_READ(rq, prev_steal_time_rq) : 0; +} + +static inline u64 scx_clock_irq(u32 cpu) +{ + struct irqtime___local *irqt; + + /* + * bpf_core_type_exists() resolves at load time: if struct irqtime is + * absent from kernel BTF (CONFIG_IRQ_TIME_ACCOUNTING off), the loader + * patches this into an unconditional return 0, making the + * bpf_per_cpu_ptr() call below dead code that the verifier never sees. + */ + if (!bpf_core_type_exists(struct irqtime___local)) + return 0; + + /* Equivalent to the kernel's irq_time_read(). */ + irqt = bpf_per_cpu_ptr(&cpu_irqtime, cpu); + return irqt ? BPF_CORE_READ(irqt, total) : 0; +} + #include "compat.bpf.h" #include "enums.bpf.h" diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h index dc18b99e55cd..60f5513787d6 100644 --- a/tools/sched_ext/include/scx/common.h +++ b/tools/sched_ext/include/scx/common.h @@ -16,6 +16,7 @@ #include <stdlib.h> #include <stdint.h> #include <errno.h> +#include "enum_defs.autogen.h" typedef uint8_t u8; typedef uint16_t u16; @@ -66,6 +67,7 @@ typedef int64_t s64; bpf_map__set_value_size((__skel)->maps.elfsec##_##arr, \ sizeof((__skel)->elfsec##_##arr->arr[0]) * (n)); \ (__skel)->elfsec##_##arr = \ + (typeof((__skel)->elfsec##_##arr)) \ bpf_map__initial_value((__skel)->maps.elfsec##_##arr, &__sz); \ } while (0) @@ -73,9 +75,6 @@ typedef int64_t s64; #include "compat.h" #include "enums.h" -/* not available when building kernel tools/sched_ext */ -#if __has_include(<lib/sdt_task.h>) -#include <lib/sdt_task.h> -#endif +#include "bpf_arena_common.h" #endif /* __SCHED_EXT_COMMON_H */ diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h index 50e1499ae093..8977b5a2caa1 100644 --- a/tools/sched_ext/include/scx/compat.bpf.h +++ b/tools/sched_ext/include/scx/compat.bpf.h @@ -16,114 +16,162 @@ }) /* v6.12: 819513666966 ("sched_ext: Add cgroup support") */ -#define __COMPAT_scx_bpf_task_cgroup(p) \ - (bpf_ksym_exists(scx_bpf_task_cgroup) ? \ - scx_bpf_task_cgroup((p)) : NULL) +struct cgroup *scx_bpf_task_cgroup___new(struct task_struct *p) __ksym __weak; + +#define scx_bpf_task_cgroup(p) \ + (bpf_ksym_exists(scx_bpf_task_cgroup___new) ? \ + scx_bpf_task_cgroup___new((p)) : NULL) /* * v6.13: The verb `dispatch` was too overloaded and confusing. kfuncs are * renamed to unload the verb. * - * Build error is triggered if old names are used. New binaries work with both - * new and old names. The compat macros will be removed on v6.15 release. - * * scx_bpf_dispatch_from_dsq() and friends were added during v6.12 by * 4c30f5ce4f7a ("sched_ext: Implement scx_bpf_dispatch[_vtime]_from_dsq()"). - * Preserve __COMPAT macros until v6.15. + * + * v7.1: scx_bpf_dsq_move_to_local___v2() to add @enq_flags. */ -void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; -void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; -bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak; -void scx_bpf_dispatch_from_dsq_set_slice___compat(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; -void scx_bpf_dispatch_from_dsq_set_vtime___compat(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; -bool scx_bpf_dispatch_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; -bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; - -#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \ - (bpf_ksym_exists(scx_bpf_dsq_insert) ? \ - scx_bpf_dsq_insert((p), (dsq_id), (slice), (enq_flags)) : \ - scx_bpf_dispatch___compat((p), (dsq_id), (slice), (enq_flags))) - -#define scx_bpf_dsq_insert_vtime(p, dsq_id, slice, vtime, enq_flags) \ - (bpf_ksym_exists(scx_bpf_dsq_insert_vtime) ? \ - scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \ - scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags))) - -#define scx_bpf_dsq_move_to_local(dsq_id) \ - (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \ - scx_bpf_dsq_move_to_local((dsq_id)) : \ - scx_bpf_consume___compat((dsq_id))) - -#define __COMPAT_scx_bpf_dsq_move_set_slice(it__iter, slice) \ - (bpf_ksym_exists(scx_bpf_dsq_move_set_slice) ? \ - scx_bpf_dsq_move_set_slice((it__iter), (slice)) : \ - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___compat) ? \ - scx_bpf_dispatch_from_dsq_set_slice___compat((it__iter), (slice)) : \ +bool scx_bpf_dsq_move_to_local___v2(u64 dsq_id, u64 enq_flags) __ksym __weak; +bool scx_bpf_dsq_move_to_local___v1(u64 dsq_id) __ksym __weak; +void scx_bpf_dsq_move_set_slice___new(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; +void scx_bpf_dsq_move_set_vtime___new(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; +bool scx_bpf_dsq_move___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; +bool scx_bpf_dsq_move_vtime___new(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; + +bool scx_bpf_consume___old(u64 dsq_id) __ksym __weak; +void scx_bpf_dispatch_from_dsq_set_slice___old(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak; +void scx_bpf_dispatch_from_dsq_set_vtime___old(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak; +bool scx_bpf_dispatch_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; +bool scx_bpf_dispatch_vtime_from_dsq___old(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak; + +#define scx_bpf_dsq_move_to_local(dsq_id, enq_flags) \ + (bpf_ksym_exists(scx_bpf_dsq_move_to_local___v2) ? \ + scx_bpf_dsq_move_to_local___v2((dsq_id), (enq_flags)) : \ + (bpf_ksym_exists(scx_bpf_dsq_move_to_local___v1) ? \ + scx_bpf_dsq_move_to_local___v1((dsq_id)) : \ + scx_bpf_consume___old((dsq_id)))) + +#define scx_bpf_dsq_move_set_slice(it__iter, slice) \ + (bpf_ksym_exists(scx_bpf_dsq_move_set_slice___new) ? \ + scx_bpf_dsq_move_set_slice___new((it__iter), (slice)) : \ + (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_slice___old) ? \ + scx_bpf_dispatch_from_dsq_set_slice___old((it__iter), (slice)) : \ (void)0)) -#define __COMPAT_scx_bpf_dsq_move_set_vtime(it__iter, vtime) \ - (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime) ? \ - scx_bpf_dsq_move_set_vtime((it__iter), (vtime)) : \ - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___compat) ? \ - scx_bpf_dispatch_from_dsq_set_vtime___compat((it__iter), (vtime)) : \ - (void) 0)) - -#define __COMPAT_scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \ - (bpf_ksym_exists(scx_bpf_dsq_move) ? \ - scx_bpf_dsq_move((it__iter), (p), (dsq_id), (enq_flags)) : \ - (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___compat) ? \ - scx_bpf_dispatch_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \ +#define scx_bpf_dsq_move_set_vtime(it__iter, vtime) \ + (bpf_ksym_exists(scx_bpf_dsq_move_set_vtime___new) ? \ + scx_bpf_dsq_move_set_vtime___new((it__iter), (vtime)) : \ + (bpf_ksym_exists(scx_bpf_dispatch_from_dsq_set_vtime___old) ? \ + scx_bpf_dispatch_from_dsq_set_vtime___old((it__iter), (vtime)) : \ + (void)0)) + +#define scx_bpf_dsq_move(it__iter, p, dsq_id, enq_flags) \ + (bpf_ksym_exists(scx_bpf_dsq_move___new) ? \ + scx_bpf_dsq_move___new((it__iter), (p), (dsq_id), (enq_flags)) : \ + (bpf_ksym_exists(scx_bpf_dispatch_from_dsq___old) ? \ + scx_bpf_dispatch_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \ false)) -#define __COMPAT_scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \ - (bpf_ksym_exists(scx_bpf_dsq_move_vtime) ? \ - scx_bpf_dsq_move_vtime((it__iter), (p), (dsq_id), (enq_flags)) : \ - (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___compat) ? \ - scx_bpf_dispatch_vtime_from_dsq___compat((it__iter), (p), (dsq_id), (enq_flags)) : \ +#define scx_bpf_dsq_move_vtime(it__iter, p, dsq_id, enq_flags) \ + (bpf_ksym_exists(scx_bpf_dsq_move_vtime___new) ? \ + scx_bpf_dsq_move_vtime___new((it__iter), (p), (dsq_id), (enq_flags)) : \ + (bpf_ksym_exists(scx_bpf_dispatch_vtime_from_dsq___old) ? \ + scx_bpf_dispatch_vtime_from_dsq___old((it__iter), (p), (dsq_id), (enq_flags)) : \ false)) -#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \ - _Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()") +/* + * v6.15: 950ad93df2fc ("bpf: add kfunc for populating cpumask bits") + * + * Compat macro will be dropped on v6.19 release. + */ +int bpf_cpumask_populate(struct cpumask *dst, void *src, size_t src__sz) __ksym __weak; -#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \ - _Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()") +#define __COMPAT_bpf_cpumask_populate(cpumask, src, size__sz) \ + (bpf_ksym_exists(bpf_cpumask_populate) ? \ + (bpf_cpumask_populate(cpumask, src, size__sz)) : -EOPNOTSUPP) -#define scx_bpf_consume(dsq_id) ({ \ - _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \ - false; \ -}) +/* + * v6.19: Introduce lockless peek API for user DSQs. + * + * Preserve the following macro until v6.21. + */ +static inline struct task_struct *__COMPAT_scx_bpf_dsq_peek(u64 dsq_id) +{ + struct task_struct *p = NULL; + struct bpf_iter_scx_dsq it; -#define scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \ - _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_slice() renamed to scx_bpf_dsq_move_set_slice()") + if (bpf_ksym_exists(scx_bpf_dsq_peek)) + return scx_bpf_dsq_peek(dsq_id); + if (!bpf_iter_scx_dsq_new(&it, dsq_id, 0)) + p = bpf_iter_scx_dsq_next(&it); + bpf_iter_scx_dsq_destroy(&it); + return p; +} -#define scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \ - _Static_assert(false, "scx_bpf_dispatch_from_dsq_set_vtime() renamed to scx_bpf_dsq_move_set_vtime()") +/* + * v7.1: scx_bpf_sub_dispatch() for sub-sched dispatch. Preserve until + * we drop the compat layer for older kernels that lack the kfunc. + */ +bool scx_bpf_sub_dispatch___compat(u64 cgroup_id) __ksym __weak; -#define scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ - _Static_assert(false, "scx_bpf_dispatch_from_dsq() renamed to scx_bpf_dsq_move()"); \ - false; \ -}) +static inline bool scx_bpf_sub_dispatch(u64 cgroup_id) +{ + if (bpf_ksym_exists(scx_bpf_sub_dispatch___compat)) + return scx_bpf_sub_dispatch___compat(cgroup_id); + return false; +} -#define scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ - _Static_assert(false, "scx_bpf_dispatch_vtime_from_dsq() renamed to scx_bpf_dsq_move_vtime()"); \ - false; \ -}) +/** + * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on + * in a compatible way. We will preserve this __COMPAT helper until v6.16. + * + * @enq_flags: enqueue flags from ops.enqueue() + * + * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags + */ +static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags) +{ +#ifdef HAVE_SCX_ENQ_CPU_SELECTED + /* + * This is the case that a BPF code compiled against vmlinux.h + * where the enum SCX_ENQ_CPU_SELECTED exists. + */ -#define __COMPAT_scx_bpf_dispatch_from_dsq_set_slice(it__iter, slice) \ - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_slice() renamed to __COMPAT_scx_bpf_dsq_move_set_slice()") + /* + * We should temporarily suspend the macro expansion of + * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being + * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED' + * is defined in 'scripts/gen_enums.py'. + */ +#pragma push_macro("SCX_ENQ_CPU_SELECTED") +#undef SCX_ENQ_CPU_SELECTED + u64 flag; -#define __COMPAT_scx_bpf_dispatch_from_dsq_set_vtime(it__iter, vtime) \ - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq_set_vtime() renamed to __COMPAT_scx_bpf_dsq_move_set_vtime()") + /* + * When the kernel did not have SCX_ENQ_CPU_SELECTED, + * select_task_rq_scx() has never been skipped. Thus, this case + * should be considered that the CPU has already been selected. + */ + if (!bpf_core_enum_value_exists(enum scx_enq_flags, + SCX_ENQ_CPU_SELECTED)) + return true; -#define __COMPAT_scx_bpf_dispatch_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move()"); \ - false; \ -}) + flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED); + return enq_flags & flag; + + /* + * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'. + */ +#pragma pop_macro("SCX_ENQ_CPU_SELECTED") +#else + /* + * This is the case that a BPF code compiled against vmlinux.h + * where the enum SCX_ENQ_CPU_SELECTED does NOT exist. + */ + return true; +#endif /* HAVE_SCX_ENQ_CPU_SELECTED */ +} -#define __COMPAT_scx_bpf_dispatch_vtime_from_dsq(it__iter, p, dsq_id, enq_flags) ({ \ - _Static_assert(false, "__COMPAT_scx_bpf_dispatch_vtime_from_dsq() renamed to __COMPAT_scx_bpf_dsq_move_vtime()"); \ - false; \ -}) #define scx_bpf_now() \ (bpf_ksym_exists(scx_bpf_now) ? \ @@ -131,6 +179,250 @@ bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, bpf_ktime_get_ns()) /* + * v6.15: Introduce event counters. + * + * Preserve the following macro until v6.17. + */ +#define __COMPAT_scx_bpf_events(events, size) \ + (bpf_ksym_exists(scx_bpf_events) ? \ + scx_bpf_events(events, size) : ({})) + +/* + * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle + * cpumasks. + * + * Preserve the following __COMPAT_scx_*_node macros until v6.17. + */ +#define __COMPAT_scx_bpf_nr_node_ids() \ + (bpf_ksym_exists(scx_bpf_nr_node_ids) ? \ + scx_bpf_nr_node_ids() : 1U) + +#define __COMPAT_scx_bpf_cpu_node(cpu) \ + (bpf_ksym_exists(scx_bpf_cpu_node) ? \ + scx_bpf_cpu_node(cpu) : 0) + +#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \ + (bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \ + scx_bpf_get_idle_cpumask_node(node) : \ + scx_bpf_get_idle_cpumask()) \ + +#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \ + (bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \ + scx_bpf_get_idle_smtmask_node(node) : \ + scx_bpf_get_idle_smtmask()) + +#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \ + (bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \ + scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \ + scx_bpf_pick_idle_cpu(cpus_allowed, flags)) + +#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \ + (bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \ + scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \ + scx_bpf_pick_any_cpu(cpus_allowed, flags)) + +/* + * v6.18: Add a helper to retrieve the current task running on a CPU. + * + * Keep this helper available until v6.20 for compatibility. + */ +static inline struct task_struct *__COMPAT_scx_bpf_cpu_curr(int cpu) +{ + struct rq *rq; + + if (bpf_ksym_exists(scx_bpf_cpu_curr)) + return scx_bpf_cpu_curr(cpu); + + rq = scx_bpf_cpu_rq(cpu); + + return rq ? rq->curr : NULL; +} + +/* + * v6.19: To work around BPF maximum parameter limit, the following kfuncs are + * replaced with variants that pack scalar arguments in a struct. Wrappers are + * provided to maintain source compatibility. + * + * v6.13: scx_bpf_dsq_insert_vtime() renaming is also handled here. See the + * block on dispatch renaming above for more details. + * + * The kernel will carry the compat variants until v6.23 to maintain binary + * compatibility. After v6.23 release, remove the compat handling and move the + * wrappers to common.bpf.h. + */ +s32 scx_bpf_select_cpu_and___compat(struct task_struct *p, s32 prev_cpu, u64 wake_flags, + const struct cpumask *cpus_allowed, u64 flags) __ksym __weak; +void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; +void scx_bpf_dsq_insert_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; + +/** + * scx_bpf_select_cpu_and - Pick an idle CPU usable by task @p + * @p: task_struct to select a CPU for + * @prev_cpu: CPU @p was on previously + * @wake_flags: %SCX_WAKE_* flags + * @cpus_allowed: cpumask of allowed CPUs + * @flags: %SCX_PICK_IDLE* flags + * + * Inline wrapper that packs scalar arguments into a struct and calls + * __scx_bpf_select_cpu_and(). See __scx_bpf_select_cpu_and() for details. + */ +static inline s32 +scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, + const struct cpumask *cpus_allowed, u64 flags) +{ + if (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args)) { + struct scx_bpf_select_cpu_and_args args = { + .prev_cpu = prev_cpu, + .wake_flags = wake_flags, + .flags = flags, + }; + + return __scx_bpf_select_cpu_and(p, cpus_allowed, &args); + } else { + return scx_bpf_select_cpu_and___compat(p, prev_cpu, wake_flags, + cpus_allowed, flags); + } +} + +/* + * scx_bpf_select_cpu_and() is now an inline wrapper. Use this instead of + * bpf_ksym_exists(scx_bpf_select_cpu_and) to test availability. + */ +#define __COMPAT_HAS_scx_bpf_select_cpu_and \ + (bpf_core_type_exists(struct scx_bpf_select_cpu_and_args) || \ + bpf_ksym_exists(scx_bpf_select_cpu_and___compat)) + +/** + * scx_bpf_dsq_insert_vtime - Insert a task into the vtime priority queue of a DSQ + * @p: task_struct to insert + * @dsq_id: DSQ to insert into + * @slice: duration @p can run for in nsecs, 0 to keep the current value + * @vtime: @p's ordering inside the vtime-sorted queue of the target DSQ + * @enq_flags: SCX_ENQ_* + * + * Inline wrapper that packs scalar arguments into a struct and calls + * __scx_bpf_dsq_insert_vtime(). See __scx_bpf_dsq_insert_vtime() for details. + */ +static inline bool +scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, + u64 enq_flags) +{ + if (bpf_core_type_exists(struct scx_bpf_dsq_insert_vtime_args)) { + struct scx_bpf_dsq_insert_vtime_args args = { + .dsq_id = dsq_id, + .slice = slice, + .vtime = vtime, + .enq_flags = enq_flags, + }; + + return __scx_bpf_dsq_insert_vtime(p, &args); + } else if (bpf_ksym_exists(scx_bpf_dsq_insert_vtime___compat)) { + scx_bpf_dsq_insert_vtime___compat(p, dsq_id, slice, vtime, + enq_flags); + return true; + } else { + scx_bpf_dispatch_vtime___compat(p, dsq_id, slice, vtime, + enq_flags); + return true; + } +} + +/* + * v6.19: scx_bpf_dsq_insert() now returns bool instead of void. Move + * scx_bpf_dsq_insert() decl to common.bpf.h and drop compat helper after v6.22. + * The extra ___compat suffix is to work around libbpf not ignoring __SUFFIX on + * kernel side. The entire suffix can be dropped later. + * + * v6.13: scx_bpf_dsq_insert() renaming is also handled here. See the block on + * dispatch renaming above for more details. + */ +bool scx_bpf_dsq_insert___v2___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; +void scx_bpf_dsq_insert___v1(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; +void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; + +static inline bool +scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) +{ + if (bpf_ksym_exists(scx_bpf_dsq_insert___v2___compat)) { + return scx_bpf_dsq_insert___v2___compat(p, dsq_id, slice, enq_flags); + } else if (bpf_ksym_exists(scx_bpf_dsq_insert___v1)) { + scx_bpf_dsq_insert___v1(p, dsq_id, slice, enq_flags); + return true; + } else { + scx_bpf_dispatch___compat(p, dsq_id, slice, enq_flags); + return true; + } +} + +/* + * v6.19: scx_bpf_task_set_slice() and scx_bpf_task_set_dsq_vtime() added to for + * sub-sched authority checks. Drop the wrappers and move the decls to + * common.bpf.h after v6.22. + */ +bool scx_bpf_task_set_slice___new(struct task_struct *p, u64 slice) __ksym __weak; +bool scx_bpf_task_set_dsq_vtime___new(struct task_struct *p, u64 vtime) __ksym __weak; + +static inline void scx_bpf_task_set_slice(struct task_struct *p, u64 slice) +{ + if (bpf_ksym_exists(scx_bpf_task_set_slice___new)) + scx_bpf_task_set_slice___new(p, slice); + else + p->scx.slice = slice; +} + +static inline void scx_bpf_task_set_dsq_vtime(struct task_struct *p, u64 vtime) +{ + if (bpf_ksym_exists(scx_bpf_task_set_dsq_vtime___new)) + scx_bpf_task_set_dsq_vtime___new(p, vtime); + else + p->scx.dsq_vtime = vtime; +} + +/* + * v6.19: The new void variant can be called from anywhere while the older v1 + * variant can only be called from ops.cpu_release(). The double ___ prefixes on + * the v2 variant need to be removed once libbpf is updated to ignore ___ prefix + * on kernel side. Drop the wrapper and move the decl to common.bpf.h after + * v6.22. + */ +u32 scx_bpf_reenqueue_local___v1(void) __ksym __weak; +void scx_bpf_reenqueue_local___v2___compat(void) __ksym __weak; + +static inline bool __COMPAT_scx_bpf_reenqueue_local_from_anywhere(void) +{ + return bpf_ksym_exists(scx_bpf_reenqueue_local___v2___compat); +} + +static inline void scx_bpf_reenqueue_local(void) +{ + if (__COMPAT_scx_bpf_reenqueue_local_from_anywhere()) + scx_bpf_reenqueue_local___v2___compat(); + else + scx_bpf_reenqueue_local___v1(); +} + +/* + * v6.20: New scx_bpf_dsq_reenq() that allows re-enqueues on more DSQs. This + * will eventually deprecate scx_bpf_reenqueue_local(). + */ +void scx_bpf_dsq_reenq___compat(u64 dsq_id, u64 reenq_flags, const struct bpf_prog_aux *aux__prog) __ksym __weak; + +static inline bool __COMPAT_has_generic_reenq(void) +{ + return bpf_ksym_exists(scx_bpf_dsq_reenq___compat); +} + +static inline void scx_bpf_dsq_reenq(u64 dsq_id, u64 reenq_flags) +{ + if (bpf_ksym_exists(scx_bpf_dsq_reenq___compat)) + scx_bpf_dsq_reenq___compat(dsq_id, reenq_flags, NULL); + else if (dsq_id == SCX_DSQ_LOCAL && reenq_flags == 0) + scx_bpf_reenqueue_local(); + else + scx_bpf_error("kernel too old to reenqueue foreign local or user DSQs"); +} + +/* * Define sched_ext_ops. This may be expanded to define multiple variants for * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH(). */ diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h index b50280e2ba2b..039854c490d5 100644 --- a/tools/sched_ext/include/scx/compat.h +++ b/tools/sched_ext/include/scx/compat.h @@ -8,6 +8,7 @@ #define __SCX_COMPAT_H #include <bpf/btf.h> +#include <bpf/libbpf.h> #include <fcntl.h> #include <stdlib.h> #include <unistd.h> @@ -106,13 +107,27 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field return false; } -#define SCX_OPS_SWITCH_PARTIAL \ - __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL") +#define SCX_OPS_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_ops_flags", #name) + +#define SCX_OPS_KEEP_BUILTIN_IDLE SCX_OPS_FLAG(SCX_OPS_KEEP_BUILTIN_IDLE) +#define SCX_OPS_ENQ_LAST SCX_OPS_FLAG(SCX_OPS_ENQ_LAST) +#define SCX_OPS_ENQ_EXITING SCX_OPS_FLAG(SCX_OPS_ENQ_EXITING) +#define SCX_OPS_SWITCH_PARTIAL SCX_OPS_FLAG(SCX_OPS_SWITCH_PARTIAL) +#define SCX_OPS_ENQ_MIGRATION_DISABLED SCX_OPS_FLAG(SCX_OPS_ENQ_MIGRATION_DISABLED) +#define SCX_OPS_ALLOW_QUEUED_WAKEUP SCX_OPS_FLAG(SCX_OPS_ALLOW_QUEUED_WAKEUP) +#define SCX_OPS_BUILTIN_IDLE_PER_NODE SCX_OPS_FLAG(SCX_OPS_BUILTIN_IDLE_PER_NODE) +#define SCX_OPS_ALWAYS_ENQ_IMMED SCX_OPS_FLAG(SCX_OPS_ALWAYS_ENQ_IMMED) + +#define SCX_PICK_IDLE_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_pick_idle_cpu_flags", #name) + +#define SCX_PICK_IDLE_CORE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_CORE) +#define SCX_PICK_IDLE_IN_NODE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_IN_NODE) static inline long scx_hotplug_seq(void) { int fd; char buf[32]; + char *endptr; ssize_t len; long val; @@ -125,8 +140,10 @@ static inline long scx_hotplug_seq(void) buf[len] = 0; close(fd); - val = strtoul(buf, NULL, 10); - SCX_BUG_ON(val < 0, "invalid num hotplug events: %lu", val); + errno = 0; + val = strtoul(buf, &endptr, 10); + SCX_BUG_ON(errno == ERANGE || endptr == buf || + (*endptr != '\n' && *endptr != '\0'), "invalid num hotplug events: %ld", val); return val; } @@ -139,6 +156,11 @@ static inline long scx_hotplug_seq(void) * * ec7e3b0463e1 ("implement-ops") in https://github.com/sched-ext/sched_ext is * the current minimum required kernel version. + * + * COMPAT: + * - v6.17: ops.cgroup_set_bandwidth() + * - v6.19: ops.cgroup_set_idle() + * - v7.1: ops.sub_attach(), ops.sub_detach(), ops.sub_cgroup_id */ #define SCX_OPS_OPEN(__ops_name, __scx_name) ({ \ struct __scx_name *__skel; \ @@ -150,18 +172,75 @@ static inline long scx_hotplug_seq(void) SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \ __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \ SCX_ENUM_INIT(__skel); \ + if (__skel->struct_ops.__ops_name->cgroup_set_bandwidth && \ + !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_bandwidth")) { \ + fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_bandwidth()\n"); \ + __skel->struct_ops.__ops_name->cgroup_set_bandwidth = NULL; \ + } \ + if (__skel->struct_ops.__ops_name->cgroup_set_idle && \ + !__COMPAT_struct_has_field("sched_ext_ops", "cgroup_set_idle")) { \ + fprintf(stderr, "WARNING: kernel doesn't support ops.cgroup_set_idle()\n"); \ + __skel->struct_ops.__ops_name->cgroup_set_idle = NULL; \ + } \ + if (__skel->struct_ops.__ops_name->sub_attach && \ + !__COMPAT_struct_has_field("sched_ext_ops", "sub_attach")) { \ + fprintf(stderr, "WARNING: kernel doesn't support ops.sub_attach()\n"); \ + __skel->struct_ops.__ops_name->sub_attach = NULL; \ + } \ + if (__skel->struct_ops.__ops_name->sub_detach && \ + !__COMPAT_struct_has_field("sched_ext_ops", "sub_detach")) { \ + fprintf(stderr, "WARNING: kernel doesn't support ops.sub_detach()\n"); \ + __skel->struct_ops.__ops_name->sub_detach = NULL; \ + } \ + if (__skel->struct_ops.__ops_name->sub_cgroup_id > 0 && \ + !__COMPAT_struct_has_field("sched_ext_ops", "sub_cgroup_id")) { \ + fprintf(stderr, "WARNING: kernel doesn't support ops.sub_cgroup_id\n"); \ + __skel->struct_ops.__ops_name->sub_cgroup_id = 0; \ + } \ __skel; \ }) +/* + * Associate non-struct_ops BPF programs with the scheduler's struct_ops map so + * that scx_prog_sched() can determine which scheduler a BPF program belongs + * to. Requires libbpf >= 1.7. + */ +#if LIBBPF_MAJOR_VERSION > 1 || \ + (LIBBPF_MAJOR_VERSION == 1 && LIBBPF_MINOR_VERSION >= 7) +static inline void __scx_ops_assoc_prog(struct bpf_program *prog, + struct bpf_map *map, + const char *ops_name) +{ + s32 err = bpf_program__assoc_struct_ops(prog, map, NULL); + if (err) + fprintf(stderr, + "ERROR: Failed to associate %s with %s: %d\n", + bpf_program__name(prog), ops_name, err); +} +#else +static inline void __scx_ops_assoc_prog(struct bpf_program *prog, + struct bpf_map *map, + const char *ops_name) +{ +} +#endif + #define SCX_OPS_LOAD(__skel, __ops_name, __scx_name, __uei_name) ({ \ + struct bpf_program *__prog; \ UEI_SET_SIZE(__skel, __ops_name, __uei_name); \ SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \ + bpf_object__for_each_program(__prog, (__skel)->obj) { \ + if (bpf_program__type(__prog) == BPF_PROG_TYPE_STRUCT_OPS) \ + continue; \ + __scx_ops_assoc_prog(__prog, (__skel)->maps.__ops_name, \ + #__ops_name); \ + } \ }) /* * New versions of bpftool now emit additional link placeholders for BPF maps, * and set up BPF skeleton in such a way that libbpf will auto-attach BPF maps - * automatically, assumming libbpf is recent enough (v1.5+). Old libbpf will do + * automatically, assuming libbpf is recent enough (v1.5+). Old libbpf will do * nothing with those links and won't attempt to auto-attach maps. * * To maintain compatibility with older libbpf while avoiding trying to attach diff --git a/tools/sched_ext/include/scx/enum_defs.autogen.h b/tools/sched_ext/include/scx/enum_defs.autogen.h new file mode 100644 index 000000000000..da4b459820fd --- /dev/null +++ b/tools/sched_ext/include/scx/enum_defs.autogen.h @@ -0,0 +1,158 @@ +/* + * WARNING: This file is autogenerated from gen_enum_defs.py [1]. + * + * [1] https://github.com/sched-ext/scx/blob/main/scripts/gen_enum_defs.py + */ + +#ifndef __ENUM_DEFS_AUTOGEN_H__ +#define __ENUM_DEFS_AUTOGEN_H__ + +#define HAVE_SCX_DSP_DFL_MAX_BATCH +#define HAVE_SCX_DSP_MAX_LOOPS +#define HAVE_SCX_WATCHDOG_MAX_TIMEOUT +#define HAVE_SCX_EXIT_BT_LEN +#define HAVE_SCX_EXIT_MSG_LEN +#define HAVE_SCX_EXIT_DUMP_DFL_LEN +#define HAVE_SCX_CPUPERF_ONE +#define HAVE_SCX_TASK_ITER_BATCH +#define HAVE_SCX_BYPASS_HOST_NTH +#define HAVE_SCX_BYPASS_LB_DFL_INTV_US +#define HAVE_SCX_BYPASS_LB_DONOR_PCT +#define HAVE_SCX_BYPASS_LB_MIN_DELTA_DIV +#define HAVE_SCX_BYPASS_LB_BATCH +#define HAVE_SCX_REENQ_LOCAL_MAX_REPEAT +#define HAVE_SCX_SUB_MAX_DEPTH +#define HAVE_SCX_CPU_PREEMPT_RT +#define HAVE_SCX_CPU_PREEMPT_DL +#define HAVE_SCX_CPU_PREEMPT_STOP +#define HAVE_SCX_CPU_PREEMPT_UNKNOWN +#define HAVE_SCX_DEQ_SLEEP +#define HAVE_SCX_DEQ_CORE_SCHED_EXEC +#define HAVE_SCX_DEQ_SCHED_CHANGE +#define HAVE_SCX_DSQ_FLAG_BUILTIN +#define HAVE_SCX_DSQ_FLAG_LOCAL_ON +#define HAVE_SCX_DSQ_INVALID +#define HAVE_SCX_DSQ_GLOBAL +#define HAVE_SCX_DSQ_LOCAL +#define HAVE_SCX_DSQ_BYPASS +#define HAVE_SCX_DSQ_LOCAL_ON +#define HAVE_SCX_DSQ_LOCAL_CPU_MASK +#define HAVE_SCX_DSQ_ITER_REV +#define HAVE___SCX_DSQ_ITER_HAS_SLICE +#define HAVE___SCX_DSQ_ITER_HAS_VTIME +#define HAVE___SCX_DSQ_ITER_USER_FLAGS +#define HAVE___SCX_DSQ_ITER_ALL_FLAGS +#define HAVE_SCX_DSQ_LNODE_ITER_CURSOR +#define HAVE___SCX_DSQ_LNODE_PRIV_SHIFT +#define HAVE_SCX_ENABLING +#define HAVE_SCX_ENABLED +#define HAVE_SCX_DISABLING +#define HAVE_SCX_DISABLED +#define HAVE_SCX_ENQ_WAKEUP +#define HAVE_SCX_ENQ_HEAD +#define HAVE_SCX_ENQ_CPU_SELECTED +#define HAVE_SCX_ENQ_PREEMPT +#define HAVE_SCX_ENQ_IMMED +#define HAVE_SCX_ENQ_REENQ +#define HAVE_SCX_ENQ_LAST +#define HAVE___SCX_ENQ_INTERNAL_MASK +#define HAVE_SCX_ENQ_CLEAR_OPSS +#define HAVE_SCX_ENQ_DSQ_PRIQ +#define HAVE_SCX_ENQ_NESTED +#define HAVE_SCX_ENQ_GDSQ_FALLBACK +#define HAVE_SCX_TASK_DSQ_ON_PRIQ +#define HAVE_SCX_TASK_QUEUED +#define HAVE_SCX_TASK_IN_CUSTODY +#define HAVE_SCX_TASK_RESET_RUNNABLE_AT +#define HAVE_SCX_TASK_DEQD_FOR_SLEEP +#define HAVE_SCX_TASK_SUB_INIT +#define HAVE_SCX_TASK_IMMED +#define HAVE_SCX_TASK_STATE_SHIFT +#define HAVE_SCX_TASK_STATE_BITS +#define HAVE_SCX_TASK_STATE_MASK +#define HAVE_SCX_TASK_NONE +#define HAVE_SCX_TASK_INIT +#define HAVE_SCX_TASK_READY +#define HAVE_SCX_TASK_ENABLED +#define HAVE_SCX_TASK_REENQ_REASON_SHIFT +#define HAVE_SCX_TASK_REENQ_REASON_BITS +#define HAVE_SCX_TASK_REENQ_REASON_MASK +#define HAVE_SCX_TASK_REENQ_NONE +#define HAVE_SCX_TASK_REENQ_KFUNC +#define HAVE_SCX_TASK_REENQ_IMMED +#define HAVE_SCX_TASK_REENQ_PREEMPTED +#define HAVE_SCX_TASK_CURSOR +#define HAVE_SCX_ECODE_RSN_HOTPLUG +#define HAVE_SCX_ECODE_RSN_CGROUP_OFFLINE +#define HAVE_SCX_ECODE_ACT_RESTART +#define HAVE_SCX_EFLAG_INITIALIZED +#define HAVE_SCX_EXIT_NONE +#define HAVE_SCX_EXIT_DONE +#define HAVE_SCX_EXIT_UNREG +#define HAVE_SCX_EXIT_UNREG_BPF +#define HAVE_SCX_EXIT_UNREG_KERN +#define HAVE_SCX_EXIT_SYSRQ +#define HAVE_SCX_EXIT_PARENT +#define HAVE_SCX_EXIT_ERROR +#define HAVE_SCX_EXIT_ERROR_BPF +#define HAVE_SCX_EXIT_ERROR_STALL +#define HAVE_SCX_KF_UNLOCKED +#define HAVE_SCX_KF_CPU_RELEASE +#define HAVE_SCX_KF_DISPATCH +#define HAVE_SCX_KF_ENQUEUE +#define HAVE_SCX_KF_SELECT_CPU +#define HAVE_SCX_KF_REST +#define HAVE___SCX_KF_RQ_LOCKED +#define HAVE___SCX_KF_TERMINAL +#define HAVE_SCX_KICK_IDLE +#define HAVE_SCX_KICK_PREEMPT +#define HAVE_SCX_KICK_WAIT +#define HAVE_SCX_OPI_BEGIN +#define HAVE_SCX_OPI_NORMAL_BEGIN +#define HAVE_SCX_OPI_NORMAL_END +#define HAVE_SCX_OPI_CPU_HOTPLUG_BEGIN +#define HAVE_SCX_OPI_CPU_HOTPLUG_END +#define HAVE_SCX_OPI_END +#define HAVE_SCX_OPS_KEEP_BUILTIN_IDLE +#define HAVE_SCX_OPS_ENQ_LAST +#define HAVE_SCX_OPS_ENQ_EXITING +#define HAVE_SCX_OPS_SWITCH_PARTIAL +#define HAVE_SCX_OPS_ENQ_MIGRATION_DISABLED +#define HAVE_SCX_OPS_ALLOW_QUEUED_WAKEUP +#define HAVE_SCX_OPS_BUILTIN_IDLE_PER_NODE +#define HAVE_SCX_OPS_ALWAYS_ENQ_IMMED +#define HAVE_SCX_OPS_ALL_FLAGS +#define HAVE___SCX_OPS_INTERNAL_MASK +#define HAVE_SCX_OPS_HAS_CPU_PREEMPT +#define HAVE_SCX_OPSS_NONE +#define HAVE_SCX_OPSS_QUEUEING +#define HAVE_SCX_OPSS_QUEUED +#define HAVE_SCX_OPSS_DISPATCHING +#define HAVE_SCX_OPSS_QSEQ_SHIFT +#define HAVE_SCX_PICK_IDLE_CORE +#define HAVE_SCX_PICK_IDLE_IN_NODE +#define HAVE_SCX_OPS_NAME_LEN +#define HAVE_SCX_SLICE_DFL +#define HAVE_SCX_SLICE_BYPASS +#define HAVE_SCX_SLICE_INF +#define HAVE_SCX_REENQ_ANY +#define HAVE___SCX_REENQ_FILTER_MASK +#define HAVE___SCX_REENQ_USER_MASK +#define HAVE_SCX_REENQ_TSR_RQ_OPEN +#define HAVE_SCX_REENQ_TSR_NOT_FIRST +#define HAVE___SCX_REENQ_TSR_MASK +#define HAVE_SCX_RQ_ONLINE +#define HAVE_SCX_RQ_CAN_STOP_TICK +#define HAVE_SCX_RQ_BAL_KEEP +#define HAVE_SCX_RQ_CLK_VALID +#define HAVE_SCX_RQ_BAL_CB_PENDING +#define HAVE_SCX_RQ_IN_WAKEUP +#define HAVE_SCX_RQ_IN_BALANCE +#define HAVE_SCX_SCHED_PCPU_BYPASSING +#define HAVE_SCX_TG_ONLINE +#define HAVE_SCX_TG_INITED +#define HAVE_SCX_WAKE_FORK +#define HAVE_SCX_WAKE_TTWU +#define HAVE_SCX_WAKE_SYNC + +#endif /* __ENUM_DEFS_AUTOGEN_H__ */ diff --git a/tools/sched_ext/include/scx/enums.autogen.bpf.h b/tools/sched_ext/include/scx/enums.autogen.bpf.h index 0e941a0d6f88..dafccbb6b69d 100644 --- a/tools/sched_ext/include/scx/enums.autogen.bpf.h +++ b/tools/sched_ext/include/scx/enums.autogen.bpf.h @@ -13,6 +13,30 @@ const volatile u64 __SCX_SLICE_DFL __weak; const volatile u64 __SCX_SLICE_INF __weak; #define SCX_SLICE_INF __SCX_SLICE_INF +const volatile u64 __SCX_RQ_ONLINE __weak; +#define SCX_RQ_ONLINE __SCX_RQ_ONLINE + +const volatile u64 __SCX_RQ_CAN_STOP_TICK __weak; +#define SCX_RQ_CAN_STOP_TICK __SCX_RQ_CAN_STOP_TICK + +const volatile u64 __SCX_RQ_BAL_PENDING __weak; +#define SCX_RQ_BAL_PENDING __SCX_RQ_BAL_PENDING + +const volatile u64 __SCX_RQ_BAL_KEEP __weak; +#define SCX_RQ_BAL_KEEP __SCX_RQ_BAL_KEEP + +const volatile u64 __SCX_RQ_BYPASSING __weak; +#define SCX_RQ_BYPASSING __SCX_RQ_BYPASSING + +const volatile u64 __SCX_RQ_CLK_VALID __weak; +#define SCX_RQ_CLK_VALID __SCX_RQ_CLK_VALID + +const volatile u64 __SCX_RQ_IN_WAKEUP __weak; +#define SCX_RQ_IN_WAKEUP __SCX_RQ_IN_WAKEUP + +const volatile u64 __SCX_RQ_IN_BALANCE __weak; +#define SCX_RQ_IN_BALANCE __SCX_RQ_IN_BALANCE + const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak; #define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN @@ -43,6 +67,12 @@ const volatile u64 __SCX_TASK_RESET_RUNNABLE_AT __weak; const volatile u64 __SCX_TASK_DEQD_FOR_SLEEP __weak; #define SCX_TASK_DEQD_FOR_SLEEP __SCX_TASK_DEQD_FOR_SLEEP +const volatile u64 __SCX_TASK_SUB_INIT __weak; +#define SCX_TASK_SUB_INIT __SCX_TASK_SUB_INIT + +const volatile u64 __SCX_TASK_IMMED __weak; +#define SCX_TASK_IMMED __SCX_TASK_IMMED + const volatile u64 __SCX_TASK_STATE_SHIFT __weak; #define SCX_TASK_STATE_SHIFT __SCX_TASK_STATE_SHIFT @@ -91,6 +121,9 @@ const volatile u64 __SCX_ENQ_HEAD __weak; const volatile u64 __SCX_ENQ_PREEMPT __weak; #define SCX_ENQ_PREEMPT __SCX_ENQ_PREEMPT +const volatile u64 __SCX_ENQ_IMMED __weak; +#define SCX_ENQ_IMMED __SCX_ENQ_IMMED + const volatile u64 __SCX_ENQ_REENQ __weak; #define SCX_ENQ_REENQ __SCX_ENQ_REENQ @@ -103,3 +136,5 @@ const volatile u64 __SCX_ENQ_CLEAR_OPSS __weak; const volatile u64 __SCX_ENQ_DSQ_PRIQ __weak; #define SCX_ENQ_DSQ_PRIQ __SCX_ENQ_DSQ_PRIQ +const volatile u64 __SCX_DEQ_SCHED_CHANGE __weak; +#define SCX_DEQ_SCHED_CHANGE __SCX_DEQ_SCHED_CHANGE diff --git a/tools/sched_ext/include/scx/enums.autogen.h b/tools/sched_ext/include/scx/enums.autogen.h index 88137a140e72..bbd4901f4fce 100644 --- a/tools/sched_ext/include/scx/enums.autogen.h +++ b/tools/sched_ext/include/scx/enums.autogen.h @@ -8,6 +8,14 @@ SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \ SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \ SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_ONLINE); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CAN_STOP_TICK); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_PENDING); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BAL_KEEP); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_BYPASSING); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_CLK_VALID); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_WAKEUP); \ + SCX_ENUM_SET(skel, scx_rq_flags, SCX_RQ_IN_BALANCE); \ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \ SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \ @@ -18,6 +26,8 @@ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_QUEUED); \ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_RESET_RUNNABLE_AT); \ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_DEQD_FOR_SLEEP); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_SUB_INIT); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_IMMED); \ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_SHIFT); \ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_BITS); \ SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_MASK); \ @@ -34,8 +44,10 @@ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_WAKEUP); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_HEAD); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_PREEMPT); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_IMMED); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_REENQ); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_LAST); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_CLEAR_OPSS); \ SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_DSQ_PRIQ); \ + SCX_ENUM_SET(skel, scx_deq_flags, SCX_DEQ_SCHED_CHANGE); \ } while (0) diff --git a/tools/sched_ext/include/scx/enums.h b/tools/sched_ext/include/scx/enums.h index 34cbebe974b7..c3b09acce824 100644 --- a/tools/sched_ext/include/scx/enums.h +++ b/tools/sched_ext/include/scx/enums.h @@ -9,12 +9,13 @@ #ifndef __SCX_ENUMS_H #define __SCX_ENUMS_H -static inline void __ENUM_set(u64 *val, char *type, char *name) +static inline void __ENUM_set(u64 *val, const char *type, const char *name) { bool res; res = __COMPAT_read_enum(type, name, val); - SCX_BUG_ON(!res, "enum not found(%s)", name); + if (!res) + *val = 0; } #define SCX_ENUM_SET(skel, type, name) do { \ diff --git a/tools/sched_ext/include/scx/user_exit_info.bpf.h b/tools/sched_ext/include/scx/user_exit_info.bpf.h new file mode 100644 index 000000000000..e7ac6611a990 --- /dev/null +++ b/tools/sched_ext/include/scx/user_exit_info.bpf.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define struct user_exit_info which is shared between BPF and userspace parts + * to communicate exit status and other information. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ + +#ifndef __USER_EXIT_INFO_BPF_H +#define __USER_EXIT_INFO_BPF_H + +#ifndef LSP +#include "vmlinux.h" +#endif +#include <bpf/bpf_core_read.h> + +#include "user_exit_info_common.h" + +#define UEI_DEFINE(__name) \ + char RESIZABLE_ARRAY(data, __name##_dump); \ + const volatile u32 __name##_dump_len; \ + struct user_exit_info __name SEC(".data") + +#define UEI_RECORD(__uei_name, __ei) ({ \ + bpf_probe_read_kernel_str(__uei_name.reason, \ + sizeof(__uei_name.reason), (__ei)->reason); \ + bpf_probe_read_kernel_str(__uei_name.msg, \ + sizeof(__uei_name.msg), (__ei)->msg); \ + bpf_probe_read_kernel_str(__uei_name##_dump, \ + __uei_name##_dump_len, (__ei)->dump); \ + if (bpf_core_field_exists((__ei)->exit_code)) \ + __uei_name.exit_code = (__ei)->exit_code; \ + /* use __sync to force memory barrier */ \ + __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \ + (__ei)->kind); \ +}) + +#endif /* __USER_EXIT_INFO_BPF_H */ diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h index 66f856640ee7..399697fa372f 100644 --- a/tools/sched_ext/include/scx/user_exit_info.h +++ b/tools/sched_ext/include/scx/user_exit_info.h @@ -10,55 +10,11 @@ #ifndef __USER_EXIT_INFO_H #define __USER_EXIT_INFO_H -#ifdef LSP -#define __bpf__ -#include "../vmlinux.h" -#endif - -enum uei_sizes { - UEI_REASON_LEN = 128, - UEI_MSG_LEN = 1024, - UEI_DUMP_DFL_LEN = 32768, -}; - -struct user_exit_info { - int kind; - s64 exit_code; - char reason[UEI_REASON_LEN]; - char msg[UEI_MSG_LEN]; -}; - -#ifdef __bpf__ - -#ifndef LSP -#include "vmlinux.h" -#endif -#include <bpf/bpf_core_read.h> - -#define UEI_DEFINE(__name) \ - char RESIZABLE_ARRAY(data, __name##_dump); \ - const volatile u32 __name##_dump_len; \ - struct user_exit_info __name SEC(".data") - -#define UEI_RECORD(__uei_name, __ei) ({ \ - bpf_probe_read_kernel_str(__uei_name.reason, \ - sizeof(__uei_name.reason), (__ei)->reason); \ - bpf_probe_read_kernel_str(__uei_name.msg, \ - sizeof(__uei_name.msg), (__ei)->msg); \ - bpf_probe_read_kernel_str(__uei_name##_dump, \ - __uei_name##_dump_len, (__ei)->dump); \ - if (bpf_core_field_exists((__ei)->exit_code)) \ - __uei_name.exit_code = (__ei)->exit_code; \ - /* use __sync to force memory barrier */ \ - __sync_val_compare_and_swap(&__uei_name.kind, __uei_name.kind, \ - (__ei)->kind); \ -}) - -#else /* !__bpf__ */ - #include <stdio.h> #include <stdbool.h> +#include "user_exit_info_common.h" + /* no need to call the following explicitly if SCX_OPS_LOAD() is used */ #define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \ u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \ @@ -114,5 +70,4 @@ enum uei_ecode_mask { #define UEI_ECODE_RESTART(__ecode) (UEI_ECODE_SYS_ACT((__ecode)) == SCX_ECODE_ACT_RESTART) -#endif /* __bpf__ */ #endif /* __USER_EXIT_INFO_H */ diff --git a/tools/sched_ext/include/scx/user_exit_info_common.h b/tools/sched_ext/include/scx/user_exit_info_common.h new file mode 100644 index 000000000000..2d0981aedd89 --- /dev/null +++ b/tools/sched_ext/include/scx/user_exit_info_common.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define struct user_exit_info which is shared between BPF and userspace parts + * to communicate exit status and other information. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#ifndef __USER_EXIT_INFO_COMMON_H +#define __USER_EXIT_INFO_COMMON_H + +#ifdef LSP +#include "../vmlinux.h" +#endif + +enum uei_sizes { + UEI_REASON_LEN = 128, + UEI_MSG_LEN = 1024, + UEI_DUMP_DFL_LEN = 32768, +}; + +struct user_exit_info { + int kind; + s64 exit_code; + char reason[UEI_REASON_LEN]; + char msg[UEI_MSG_LEN]; +}; + +#endif /* __USER_EXIT_INFO_COMMON_H */ diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c index 50bc1737c167..4efcce099bd5 100644 --- a/tools/sched_ext/scx_central.bpf.c +++ b/tools/sched_ext/scx_central.bpf.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * A central FIFO sched_ext scheduler which demonstrates the followings: + * A central FIFO sched_ext scheduler which demonstrates the following: * * a. Making all scheduling decisions from one CPU: * @@ -60,6 +60,7 @@ const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ const volatile u64 slice_ns; bool timer_pinned = true; +bool timer_started; u64 nr_total, nr_locals, nr_queued, nr_lost_pids; u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries; u64 nr_overflows; @@ -179,9 +180,47 @@ static bool dispatch_to_cpu(s32 cpu) return false; } +static void start_central_timer(void) +{ + struct bpf_timer *timer; + u32 key = 0; + int ret; + + if (likely(timer_started)) + return; + + timer = bpf_map_lookup_elem(¢ral_timer, &key); + if (!timer) { + scx_bpf_error("failed to lookup central timer"); + return; + } + + ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN); + /* + * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a + * kernel which doesn't have it, bpf_timer_start() will return -EINVAL. + * Retry without the PIN. This would be the perfect use case for + * bpf_core_enum_value_exists() but the enum type doesn't have a name + * and can't be used with bpf_core_enum_value_exists(). Oh well... + */ + if (ret == -EINVAL) { + timer_pinned = false; + ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); + } + + if (ret) { + scx_bpf_error("bpf_timer_start failed (%d)", ret); + return; + } + + timer_started = true; +} + void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) { if (cpu == central_cpu) { + start_central_timer(); + /* dispatch for all other CPUs first */ __sync_fetch_and_add(&nr_dispatches, 1); @@ -214,13 +253,13 @@ void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) } /* look for a task to run on the central CPU */ - if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID)) + if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID, 0)) return; dispatch_to_cpu(central_cpu); } else { bool *gimme; - if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID)) + if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID, 0)) return; gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids); @@ -301,36 +340,21 @@ int BPF_STRUCT_OPS_SLEEPABLE(central_init) int ret; ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); - if (ret) + if (ret) { + scx_bpf_error("scx_bpf_create_dsq failed (%d)", ret); return ret; + } timer = bpf_map_lookup_elem(¢ral_timer, &key); if (!timer) return -ESRCH; - if (bpf_get_smp_processor_id() != central_cpu) { - scx_bpf_error("init from non-central CPU"); - return -EINVAL; - } - bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC); bpf_timer_set_callback(timer, central_timerfn); - ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN); - /* - * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a - * kernel which doesn't have it, bpf_timer_start() will return -EINVAL. - * Retry without the PIN. This would be the perfect use case for - * bpf_core_enum_value_exists() but the enum type doesn't have a name - * and can't be used with bpf_core_enum_value_exists(). Oh well... - */ - if (ret == -EINVAL) { - timer_pinned = false; - ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); - } - if (ret) - scx_bpf_error("bpf_timer_start failed (%d)", ret); - return ret; + scx_bpf_kick_cpu(central_cpu, 0); + + return 0; } void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei) diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index 1e9f74525d8f..4a72df39500d 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -5,11 +5,11 @@ * Copyright (c) 2022 David Vernet <dvernet@meta.com> */ #define _GNU_SOURCE -#include <sched.h> #include <stdio.h> #include <unistd.h> #include <inttypes.h> #include <signal.h> +#include <assert.h> #include <libgen.h> #include <bpf/bpf.h> #include <scx/common.h> @@ -20,7 +20,7 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-c CPU]\n" +"Usage: %s [-s SLICE_US] [-c CPU] [-v]\n" "\n" " -s SLICE_US Override slice duration\n" " -c CPU Override the central CPU (default: 0)\n" @@ -48,26 +48,36 @@ int main(int argc, char **argv) struct bpf_link *link; __u64 seq = 0, ecode; __s32 opt; - cpu_set_t *cpuset; libbpf_set_print(libbpf_print_fn); signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(central_ops, scx_central); skel->rodata->central_cpu = 0; skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); - while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) { + assert(skel->rodata->nr_cpu_ids > 0); + assert(skel->rodata->nr_cpu_ids <= INT32_MAX); + + while ((opt = getopt(argc, argv, "s:c:vh")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; break; - case 'c': - skel->rodata->central_cpu = strtoul(optarg, NULL, 0); + case 'c': { + u32 central_cpu = strtoul(optarg, NULL, 0); + if (central_cpu >= skel->rodata->nr_cpu_ids) { + fprintf(stderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids); + scx_central__destroy(skel); + return -1; + } + skel->rodata->central_cpu = (s32)central_cpu; break; + } case 'v': verbose = true; break; @@ -83,26 +93,6 @@ restart: SCX_OPS_LOAD(skel, central_ops, scx_central, uei); - /* - * Affinitize the loading thread to the central CPU, as: - * - That's where the BPF timer is first invoked in the BPF program. - * - We probably don't want this user space component to take up a core - * from a task that would benefit from avoiding preemption on one of - * the tickless cores. - * - * Until BPF supports pinning the timer, it's not guaranteed that it - * will always be invoked on the central CPU. In practice, this - * suffices the majority of the time. - */ - cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids); - SCX_BUG_ON(!cpuset, "Failed to allocate cpuset"); - CPU_ZERO(cpuset); - CPU_SET(skel->rodata->central_cpu, cpuset); - SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset), - "Failed to affinitize to central CPU %d (max %d)", - skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1); - CPU_FREE(cpuset); - link = SCX_OPS_ATTACH(skel, central_ops, scx_central); if (!skel->data->timer_pinned) diff --git a/tools/sched_ext/scx_cpu0.bpf.c b/tools/sched_ext/scx_cpu0.bpf.c new file mode 100644 index 000000000000..0b1a7ce879b0 --- /dev/null +++ b/tools/sched_ext/scx_cpu0.bpf.c @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A CPU0 scheduler. + * + * This scheduler queues all tasks to a shared DSQ and only dispatches them on + * CPU0 in FIFO order. This is useful for testing bypass behavior when many + * tasks are concentrated on a single CPU. If the load balancer doesn't work, + * bypass mode can trigger task hangs or RCU stalls as the queue is long and + * there's only one CPU working on it. + * + * - Statistics tracking how many tasks are queued to local and CPU0 DSQs. + * - Termination notification for userspace. + * + * Copyright (c) 2025 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2025 Tejun Heo <tj@kernel.org> + */ +#include <scx/common.bpf.h> + +char _license[] SEC("license") = "GPL"; + +const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */ + +UEI_DEFINE(uei); + +/* + * We create a custom DSQ with ID 0 that we dispatch to and consume from on + * CPU0. + */ +#define DSQ_CPU0 0 + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); + __uint(max_entries, 2); /* [local, cpu0] */ +} stats SEC(".maps"); + +static void stat_inc(u32 idx) +{ + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx); + if (cnt_p) + (*cnt_p)++; +} + +s32 BPF_STRUCT_OPS(cpu0_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) +{ + return 0; +} + +void BPF_STRUCT_OPS(cpu0_enqueue, struct task_struct *p, u64 enq_flags) +{ + /* + * select_cpu() always picks CPU0. If @p is not on CPU0, it can't run on + * CPU 0. Queue on whichever CPU it's currently only. + */ + if (scx_bpf_task_cpu(p) != 0) { + stat_inc(0); /* count local queueing */ + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); + return; + } + + stat_inc(1); /* count cpu0 queueing */ + scx_bpf_dsq_insert(p, DSQ_CPU0, SCX_SLICE_DFL, enq_flags); +} + +void BPF_STRUCT_OPS(cpu0_dispatch, s32 cpu, struct task_struct *prev) +{ + if (cpu == 0) + scx_bpf_dsq_move_to_local(DSQ_CPU0, 0); +} + +s32 BPF_STRUCT_OPS_SLEEPABLE(cpu0_init) +{ + int ret; + + ret = scx_bpf_create_dsq(DSQ_CPU0, -1); + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", DSQ_CPU0, ret); + return ret; + } + + return 0; +} + +void BPF_STRUCT_OPS(cpu0_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +SCX_OPS_DEFINE(cpu0_ops, + .select_cpu = (void *)cpu0_select_cpu, + .enqueue = (void *)cpu0_enqueue, + .dispatch = (void *)cpu0_dispatch, + .init = (void *)cpu0_init, + .exit = (void *)cpu0_exit, + .name = "cpu0"); diff --git a/tools/sched_ext/scx_cpu0.c b/tools/sched_ext/scx_cpu0.c new file mode 100644 index 000000000000..a6fba9978b9c --- /dev/null +++ b/tools/sched_ext/scx_cpu0.c @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2025 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2025 Tejun Heo <tj@kernel.org> + */ +#include <stdio.h> +#include <unistd.h> +#include <signal.h> +#include <assert.h> +#include <libgen.h> +#include <bpf/bpf.h> +#include <scx/common.h> +#include "scx_cpu0.bpf.skel.h" + +const char help_fmt[] = +"A cpu0 sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-v]\n" +"\n" +" -v Print libbpf debug messages\n" +" -h Display this help and exit\n"; + +static bool verbose; +static volatile int exit_req; + +static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static void sigint_handler(int sig) +{ + exit_req = 1; +} + +static void read_stats(struct scx_cpu0 *skel, __u64 *stats) +{ + int nr_cpus = libbpf_num_possible_cpus(); + assert(nr_cpus > 0); + __u64 cnts[2][nr_cpus]; + __u32 idx; + + memset(stats, 0, sizeof(stats[0]) * 2); + + for (idx = 0; idx < 2; idx++) { + int ret, cpu; + + ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), + &idx, cnts[idx]); + if (ret < 0) + continue; + for (cpu = 0; cpu < nr_cpus; cpu++) + stats[idx] += cnts[idx][cpu]; + } +} + +int main(int argc, char **argv) +{ + struct scx_cpu0 *skel; + struct bpf_link *link; + __u32 opt; + __u64 ecode; + + libbpf_set_print(libbpf_print_fn); + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); +restart: + optind = 1; + skel = SCX_OPS_OPEN(cpu0_ops, scx_cpu0); + + skel->rodata->nr_cpus = libbpf_num_possible_cpus(); + + while ((opt = getopt(argc, argv, "vh")) != -1) { + switch (opt) { + case 'v': + verbose = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + SCX_OPS_LOAD(skel, cpu0_ops, scx_cpu0, uei); + link = SCX_OPS_ATTACH(skel, cpu0_ops, scx_cpu0); + + while (!exit_req && !UEI_EXITED(skel, uei)) { + __u64 stats[2]; + + read_stats(skel, stats); + printf("local=%llu cpu0=%llu\n", stats[0], stats[1]); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + ecode = UEI_REPORT(skel, uei); + scx_cpu0__destroy(skel); + + if (UEI_ECODE_RESTART(ecode)) + goto restart; + return 0; +} diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c index 2c720e3ecad5..fec359581826 100644 --- a/tools/sched_ext/scx_flatcg.bpf.c +++ b/tools/sched_ext/scx_flatcg.bpf.c @@ -18,7 +18,7 @@ * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's * share in that competition is 100/(200+100) == 1/3. B's eventual share in the * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's - * eventual shaer is the same at 1/6. D is only competing at the top level and + * eventual share is the same at 1/6. D is only competing at the top level and * its share is 200/(100+200) == 2/3. * * So, instead of hierarchically scheduling level-by-level, we can consider it @@ -382,7 +382,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags) return; } - cgrp = __COMPAT_scx_bpf_task_cgroup(p); + cgrp = scx_bpf_task_cgroup(p); cgc = find_cgrp_ctx(cgrp); if (!cgc) goto out_release; @@ -508,7 +508,7 @@ void BPF_STRUCT_OPS(fcg_runnable, struct task_struct *p, u64 enq_flags) { struct cgroup *cgrp; - cgrp = __COMPAT_scx_bpf_task_cgroup(p); + cgrp = scx_bpf_task_cgroup(p); update_active_weight_sums(cgrp, true); bpf_cgroup_release(cgrp); } @@ -521,7 +521,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p) if (fifo_sched) return; - cgrp = __COMPAT_scx_bpf_task_cgroup(p); + cgrp = scx_bpf_task_cgroup(p); cgc = find_cgrp_ctx(cgrp); if (cgc) { /* @@ -551,9 +551,11 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) * too much, determine the execution time by taking explicit timestamps * instead of depending on @p->scx.slice. */ - if (!fifo_sched) - p->scx.dsq_vtime += - (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + if (!fifo_sched) { + u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice); + + scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta); + } taskc = bpf_task_storage_get(&task_ctx, p, 0, 0); if (!taskc) { @@ -564,7 +566,7 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable) if (!taskc->bypassed_at) return; - cgrp = __COMPAT_scx_bpf_task_cgroup(p); + cgrp = scx_bpf_task_cgroup(p); cgc = find_cgrp_ctx(cgrp); if (cgc) { __sync_fetch_and_add(&cgc->cvtime_delta, @@ -578,7 +580,7 @@ void BPF_STRUCT_OPS(fcg_quiescent, struct task_struct *p, u64 deq_flags) { struct cgroup *cgrp; - cgrp = __COMPAT_scx_bpf_task_cgroup(p); + cgrp = scx_bpf_task_cgroup(p); update_active_weight_sums(cgrp, false); bpf_cgroup_release(cgrp); } @@ -660,7 +662,7 @@ static bool try_pick_next_cgroup(u64 *cgidp) goto out_free; } - if (!scx_bpf_dsq_move_to_local(cgid)) { + if (!scx_bpf_dsq_move_to_local(cgid, 0)) { bpf_cgroup_release(cgrp); stat_inc(FCG_STAT_PNC_EMPTY); goto out_stash; @@ -740,7 +742,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) goto pick_next_cgroup; if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) { - if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) { + if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid, 0)) { stat_inc(FCG_STAT_CNS_KEEP); return; } @@ -780,7 +782,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) pick_next_cgroup: cpuc->cur_at = now; - if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ)) { + if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ, 0)) { cpuc->cur_cgid = 0; return; } @@ -822,7 +824,7 @@ s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p, if (!(cgc = find_cgrp_ctx(args->cgroup))) return -ENOENT; - p->scx.dsq_vtime = cgc->tvtime_now; + scx_bpf_task_set_dsq_vtime(p, cgc->tvtime_now); return 0; } @@ -842,8 +844,10 @@ int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init, struct cgroup *cgrp, * unlikely case that it breaks. */ ret = scx_bpf_create_dsq(cgid, -1); - if (ret) + if (ret) { + scx_bpf_error("scx_bpf_create_dsq failed (%d)", ret); return ret; + } cgc = bpf_cgrp_storage_get(&cgrp_ctx, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); @@ -917,17 +921,25 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p, struct fcg_cgrp_ctx *from_cgc, *to_cgc; s64 delta; - /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */ + /* find_cgrp_ctx() triggers scx_bpf_error() on lookup failures */ if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to))) return; delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now); - p->scx.dsq_vtime = to_cgc->tvtime_now + delta; + scx_bpf_task_set_dsq_vtime(p, to_cgc->tvtime_now + delta); } s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init) { - return scx_bpf_create_dsq(FALLBACK_DSQ, -1); + int ret; + + ret = scx_bpf_create_dsq(FALLBACK_DSQ, -1); + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", FALLBACK_DSQ, ret); + return ret; + } + + return 0; } void BPF_STRUCT_OPS(fcg_exit, struct scx_exit_info *ei) @@ -950,5 +962,5 @@ SCX_OPS_DEFINE(flatcg_ops, .cgroup_move = (void *)fcg_cgroup_move, .init = (void *)fcg_init, .exit = (void *)fcg_exit, - .flags = SCX_OPS_HAS_CGROUP_WEIGHT | SCX_OPS_ENQ_EXITING, + .flags = SCX_OPS_ENQ_EXITING, .name = "flatcg"); diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c index 6dd423eeb4ff..d865c381589b 100644 --- a/tools/sched_ext/scx_flatcg.c +++ b/tools/sched_ext/scx_flatcg.c @@ -6,6 +6,7 @@ */ #include <stdio.h> #include <signal.h> +#include <assert.h> #include <unistd.h> #include <libgen.h> #include <limits.h> @@ -101,21 +102,27 @@ static float read_cpu_util(__u64 *last_sum, __u64 *last_idle) static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats) { - __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus]; + __u64 *cnts; __u32 idx; + cnts = calloc(skel->rodata->nr_cpus, sizeof(__u64)); + if (!cnts) + return; + memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS); for (idx = 0; idx < FCG_NR_STATS; idx++) { int ret, cpu; ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), - &idx, cnts[idx]); + &idx, cnts); if (ret < 0) continue; for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++) - stats[idx] += cnts[idx][cpu]; + stats[idx] += cnts[cpu]; } + + free(cnts); } int main(int argc, char **argv) @@ -134,9 +141,11 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg); skel->rodata->nr_cpus = libbpf_num_possible_cpus(); + assert(skel->rodata->nr_cpus > 0); skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) { diff --git a/tools/sched_ext/scx_pair.bpf.c b/tools/sched_ext/scx_pair.bpf.c new file mode 100644 index 000000000000..267011b57cba --- /dev/null +++ b/tools/sched_ext/scx_pair.bpf.c @@ -0,0 +1,610 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext core-scheduler which always makes every sibling CPU pair + * execute from the same CPU cgroup. + * + * This scheduler is a minimal implementation and would need some form of + * priority handling both inside each cgroup and across the cgroups to be + * practically useful. + * + * Each CPU in the system is paired with exactly one other CPU, according to a + * "stride" value that can be specified when the BPF scheduler program is first + * loaded. Throughout the runtime of the scheduler, these CPU pairs guarantee + * that they will only ever schedule tasks that belong to the same CPU cgroup. + * + * Scheduler Initialization + * ------------------------ + * + * The scheduler BPF program is first initialized from user space, before it is + * enabled. During this initialization process, each CPU on the system is + * assigned several values that are constant throughout its runtime: + * + * 1. *Pair CPU*: The CPU that it synchronizes with when making scheduling + * decisions. Paired CPUs always schedule tasks from the same + * CPU cgroup, and synchronize with each other to guarantee + * that this constraint is not violated. + * 2. *Pair ID*: Each CPU pair is assigned a Pair ID, which is used to access + * a struct pair_ctx object that is shared between the pair. + * 3. *In-pair-index*: An index, 0 or 1, that is assigned to each core in the + * pair. Each struct pair_ctx has an active_mask field, + * which is a bitmap used to indicate whether each core + * in the pair currently has an actively running task. + * This index specifies which entry in the bitmap corresponds + * to each CPU in the pair. + * + * During this initialization, the CPUs are paired according to a "stride" that + * may be specified when invoking the user space program that initializes and + * loads the scheduler. By default, the stride is 1/2 the total number of CPUs. + * + * Tasks and cgroups + * ----------------- + * + * Every cgroup in the system is registered with the scheduler using the + * pair_cgroup_init() callback, and every task in the system is associated with + * exactly one cgroup. At a high level, the idea with the pair scheduler is to + * always schedule tasks from the same cgroup within a given CPU pair. When a + * task is enqueued (i.e. passed to the pair_enqueue() callback function), its + * cgroup ID is read from its task struct, and then a corresponding queue map + * is used to FIFO-enqueue the task for that cgroup. + * + * If you look through the implementation of the scheduler, you'll notice that + * there is quite a bit of complexity involved with looking up the per-cgroup + * FIFO queue that we enqueue tasks in. For example, there is a cgrp_q_idx_hash + * BPF hash map that is used to map a cgroup ID to a globally unique ID that's + * allocated in the BPF program. This is done because we use separate maps to + * store the FIFO queue of tasks, and the length of that map, per cgroup. This + * complexity is only present because of current deficiencies in BPF that will + * soon be addressed. The main point to keep in mind is that newly enqueued + * tasks are added to their cgroup's FIFO queue. + * + * Dispatching tasks + * ----------------- + * + * This section will describe how enqueued tasks are dispatched and scheduled. + * Tasks are dispatched in pair_dispatch(), and at a high level the workflow is + * as follows: + * + * 1. Fetch the struct pair_ctx for the current CPU. As mentioned above, this is + * the structure that's used to synchronize amongst the two pair CPUs in their + * scheduling decisions. After any of the following events have occurred: + * + * - The cgroup's slice run has expired, or + * - The cgroup becomes empty, or + * - Either CPU in the pair is preempted by a higher priority scheduling class + * + * The cgroup transitions to the draining state and stops executing new tasks + * from the cgroup. + * + * 2. If the pair is still executing a task, mark the pair_ctx as draining, and + * wait for the pair CPU to be preempted. + * + * 3. Otherwise, if the pair CPU is not running a task, we can move onto + * scheduling new tasks. Pop the next cgroup id from the top_q queue. + * + * 4. Pop a task from that cgroup's FIFO task queue, and begin executing it. + * + * Note again that this scheduling behavior is simple, but the implementation + * is complex mostly because this it hits several BPF shortcomings and has to + * work around in often awkward ways. Most of the shortcomings are expected to + * be resolved in the near future which should allow greatly simplifying this + * scheduler. + * + * Dealing with preemption + * ----------------------- + * + * SCX is the lowest priority sched_class, and could be preempted by them at + * any time. To address this, the scheduler implements pair_cpu_release() and + * pair_cpu_acquire() callbacks which are invoked by the core scheduler when + * the scheduler loses and gains control of the CPU respectively. + * + * In pair_cpu_release(), we mark the pair_ctx as having been preempted, and + * then invoke: + * + * scx_bpf_kick_cpu(pair_cpu, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + * + * This preempts the pair CPU, and waits until it has re-entered the scheduler + * before returning. This is necessary to ensure that the higher priority + * sched_class that preempted our scheduler does not schedule a task + * concurrently with our pair CPU. + * + * When the CPU is re-acquired in pair_cpu_acquire(), we unmark the preemption + * in the pair_ctx, and send another resched IPI to the pair CPU to re-enable + * pair scheduling. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#include <scx/common.bpf.h> +#include "scx_pair.h" + +char _license[] SEC("license") = "GPL"; + +/* !0 for veristat, set during init */ +const volatile u32 nr_cpu_ids = 1; + +/* a pair of CPUs stay on a cgroup for this duration */ +const volatile u32 pair_batch_dur_ns; + +/* cpu ID -> pair cpu ID */ +const volatile s32 RESIZABLE_ARRAY(rodata, pair_cpu); + +/* cpu ID -> pair_id */ +const volatile u32 RESIZABLE_ARRAY(rodata, pair_id); + +/* CPU ID -> CPU # in the pair (0 or 1) */ +const volatile u32 RESIZABLE_ARRAY(rodata, in_pair_idx); + +struct pair_ctx { + struct bpf_spin_lock lock; + + /* the cgroup the pair is currently executing */ + u64 cgid; + + /* the pair started executing the current cgroup at */ + u64 started_at; + + /* whether the current cgroup is draining */ + bool draining; + + /* the CPUs that are currently active on the cgroup */ + u32 active_mask; + + /* + * the CPUs that are currently preempted and running tasks in a + * different scheduler. + */ + u32 preempted_mask; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, struct pair_ctx); +} pair_ctx SEC(".maps"); + +/* queue of cgrp_q's possibly with tasks on them */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + /* + * Because it's difficult to build strong synchronization encompassing + * multiple non-trivial operations in BPF, this queue is managed in an + * opportunistic way so that we guarantee that a cgroup w/ active tasks + * is always on it but possibly multiple times. Once we have more robust + * synchronization constructs and e.g. linked list, we should be able to + * do this in a prettier way but for now just size it big enough. + */ + __uint(max_entries, 4 * MAX_CGRPS); + __type(value, u64); +} top_q SEC(".maps"); + +/* per-cgroup q which FIFOs the tasks from the cgroup */ +struct cgrp_q { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_QUEUED); + __type(value, u32); +}; + +/* + * Ideally, we want to allocate cgrp_q and cgrq_q_len in the cgroup local + * storage; however, a cgroup local storage can only be accessed from the BPF + * progs attached to the cgroup. For now, work around by allocating array of + * cgrp_q's and then allocating per-cgroup indices. + * + * Another caveat: It's difficult to populate a large array of maps statically + * or from BPF. Initialize it from userland. + */ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_CGRPS); + __type(key, s32); + __array(values, struct cgrp_q); +} cgrp_q_arr SEC(".maps"); + +static u64 cgrp_q_len[MAX_CGRPS]; + +/* + * This and cgrp_q_idx_hash combine into a poor man's IDR. This likely would be + * useful to have as a map type. + */ +static u32 cgrp_q_idx_cursor; +static u64 cgrp_q_idx_busy[MAX_CGRPS]; + +/* + * All added up, the following is what we do: + * + * 1. When a cgroup is enabled, RR cgroup_q_idx_busy array doing cmpxchg looking + * for a free ID. If not found, fail cgroup creation with -EBUSY. + * + * 2. Hash the cgroup ID to the allocated cgrp_q_idx in the following + * cgrp_q_idx_hash. + * + * 3. Whenever a cgrp_q needs to be accessed, first look up the cgrp_q_idx from + * cgrp_q_idx_hash and then access the corresponding entry in cgrp_q_arr. + * + * This is sadly complicated for something pretty simple. Hopefully, we should + * be able to simplify in the future. + */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_CGRPS); + __uint(key_size, sizeof(u64)); /* cgrp ID */ + __uint(value_size, sizeof(s32)); /* cgrp_q idx */ +} cgrp_q_idx_hash SEC(".maps"); + +/* statistics */ +u64 nr_total, nr_dispatched, nr_missing, nr_kicks, nr_preemptions; +u64 nr_exps, nr_exp_waits, nr_exp_empty; +u64 nr_cgrp_next, nr_cgrp_coll, nr_cgrp_empty; + +UEI_DEFINE(uei); + +void BPF_STRUCT_OPS(pair_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct cgroup *cgrp; + struct cgrp_q *cgq; + s32 pid = p->pid; + u64 cgid; + u32 *q_idx; + u64 *cgq_len; + + __sync_fetch_and_add(&nr_total, 1); + + cgrp = scx_bpf_task_cgroup(p); + cgid = cgrp->kn->id; + bpf_cgroup_release(cgrp); + + /* find the cgroup's q and push @p into it */ + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!q_idx) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return; + } + + cgq = bpf_map_lookup_elem(&cgrp_q_arr, q_idx); + if (!cgq) { + scx_bpf_error("failed to lookup q_arr for cgroup[%llu] q_idx[%u]", + cgid, *q_idx); + return; + } + + if (bpf_map_push_elem(cgq, &pid, 0)) { + scx_bpf_error("cgroup[%llu] queue overflow", cgid); + return; + } + + /* bump q len, if going 0 -> 1, queue cgroup into the top_q */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len) { + scx_bpf_error("MEMBER_VTPR malfunction"); + return; + } + + if (!__sync_fetch_and_add(cgq_len, 1) && + bpf_map_push_elem(&top_q, &cgid, 0)) { + scx_bpf_error("top_q overflow"); + return; + } +} + +static int lookup_pairc_and_mask(s32 cpu, struct pair_ctx **pairc, u32 *mask) +{ + u32 *vptr; + + vptr = (u32 *)ARRAY_ELEM_PTR(pair_id, cpu, nr_cpu_ids); + if (!vptr) + return -EINVAL; + + *pairc = bpf_map_lookup_elem(&pair_ctx, vptr); + if (!(*pairc)) + return -EINVAL; + + vptr = (u32 *)ARRAY_ELEM_PTR(in_pair_idx, cpu, nr_cpu_ids); + if (!vptr) + return -EINVAL; + + *mask = 1U << *vptr; + + return 0; +} + +__attribute__((noinline)) +static int try_dispatch(s32 cpu) +{ + struct pair_ctx *pairc; + struct bpf_map *cgq_map; + struct task_struct *p; + u64 now = scx_bpf_now(); + bool kick_pair = false; + bool expired, pair_preempted; + u32 *vptr, in_pair_mask; + s32 pid, q_idx; + u64 cgid; + int ret; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) { + scx_bpf_error("failed to lookup pairc and in_pair_mask for cpu[%d]", + cpu); + return -ENOENT; + } + + bpf_spin_lock(&pairc->lock); + pairc->active_mask &= ~in_pair_mask; + + expired = time_before(pairc->started_at + pair_batch_dur_ns, now); + if (expired || pairc->draining) { + u64 new_cgid = 0; + + __sync_fetch_and_add(&nr_exps, 1); + + /* + * We're done with the current cgid. An obvious optimization + * would be not draining if the next cgroup is the current one. + * For now, be dumb and always expire. + */ + pairc->draining = true; + + pair_preempted = pairc->preempted_mask; + if (pairc->active_mask || pair_preempted) { + /* + * The other CPU is still active, or is no longer under + * our control due to e.g. being preempted by a higher + * priority sched_class. We want to wait until this + * cgroup expires, or until control of our pair CPU has + * been returned to us. + * + * If the pair controls its CPU, and the time already + * expired, kick. When the other CPU arrives at + * dispatch and clears its active mask, it'll push the + * pair to the next cgroup and kick this CPU. + */ + __sync_fetch_and_add(&nr_exp_waits, 1); + bpf_spin_unlock(&pairc->lock); + if (expired && !pair_preempted) + kick_pair = true; + goto out_maybe_kick; + } + + bpf_spin_unlock(&pairc->lock); + + /* + * Pick the next cgroup. It'd be easier / cleaner to not drop + * pairc->lock and use stronger synchronization here especially + * given that we'll be switching cgroups significantly less + * frequently than tasks. Unfortunately, bpf_spin_lock can't + * really protect anything non-trivial. Let's do opportunistic + * operations instead. + */ + bpf_repeat(BPF_MAX_LOOPS) { + u32 *q_idx; + u64 *cgq_len; + + if (bpf_map_pop_elem(&top_q, &new_cgid)) { + /* no active cgroup, go idle */ + __sync_fetch_and_add(&nr_exp_empty, 1); + return 0; + } + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &new_cgid); + if (!q_idx) + continue; + + /* + * This is the only place where empty cgroups are taken + * off the top_q. + */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len || !*cgq_len) + continue; + + /* + * If it has any tasks, requeue as we may race and not + * execute it. + */ + bpf_map_push_elem(&top_q, &new_cgid, 0); + break; + } + + bpf_spin_lock(&pairc->lock); + + /* + * The other CPU may already have started on a new cgroup while + * we dropped the lock. Make sure that we're still draining and + * start on the new cgroup. + */ + if (pairc->draining && !pairc->active_mask) { + __sync_fetch_and_add(&nr_cgrp_next, 1); + pairc->cgid = new_cgid; + pairc->started_at = now; + pairc->draining = false; + kick_pair = true; + } else { + __sync_fetch_and_add(&nr_cgrp_coll, 1); + } + } + + cgid = pairc->cgid; + pairc->active_mask |= in_pair_mask; + bpf_spin_unlock(&pairc->lock); + + /* again, it'd be better to do all these with the lock held, oh well */ + vptr = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!vptr) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return -ENOENT; + } + q_idx = *vptr; + + /* claim one task from cgrp_q w/ q_idx */ + bpf_repeat(BPF_MAX_LOOPS) { + u64 *cgq_len, len; + + cgq_len = MEMBER_VPTR(cgrp_q_len, [q_idx]); + if (!cgq_len || !(len = *(volatile u64 *)cgq_len)) { + /* the cgroup must be empty, expire and repeat */ + __sync_fetch_and_add(&nr_cgrp_empty, 1); + bpf_spin_lock(&pairc->lock); + pairc->draining = true; + pairc->active_mask &= ~in_pair_mask; + bpf_spin_unlock(&pairc->lock); + return -EAGAIN; + } + + if (__sync_val_compare_and_swap(cgq_len, len, len - 1) != len) + continue; + + break; + } + + cgq_map = bpf_map_lookup_elem(&cgrp_q_arr, &q_idx); + if (!cgq_map) { + scx_bpf_error("failed to lookup cgq_map for cgroup[%llu] q_idx[%d]", + cgid, q_idx); + return -ENOENT; + } + + if (bpf_map_pop_elem(cgq_map, &pid)) { + scx_bpf_error("cgq_map is empty for cgroup[%llu] q_idx[%d]", + cgid, q_idx); + return -ENOENT; + } + + p = bpf_task_from_pid(pid); + if (p) { + __sync_fetch_and_add(&nr_dispatched, 1); + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } else { + /* we don't handle dequeues, retry on lost tasks */ + __sync_fetch_and_add(&nr_missing, 1); + return -EAGAIN; + } + +out_maybe_kick: + if (kick_pair) { + s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids); + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } + return 0; +} + +void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev) +{ + bpf_repeat(BPF_MAX_LOOPS) { + if (try_dispatch(cpu) != -EAGAIN) + break; + } +} + +void BPF_STRUCT_OPS(pair_cpu_acquire, s32 cpu, struct scx_cpu_acquire_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask &= ~in_pair_mask; + /* Kick the pair CPU, unless it was also preempted. */ + kick_pair = !pairc->preempted_mask; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } +} + +void BPF_STRUCT_OPS(pair_cpu_release, s32 cpu, struct scx_cpu_release_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask |= in_pair_mask; + pairc->active_mask &= ~in_pair_mask; + /* Kick the pair CPU if it's still running. */ + kick_pair = pairc->active_mask; + pairc->draining = true; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)ARRAY_ELEM_PTR(pair_cpu, cpu, nr_cpu_ids); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + } + } + __sync_fetch_and_add(&nr_preemptions, 1); +} + +s32 BPF_STRUCT_OPS(pair_cgroup_init, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 i, q_idx; + + bpf_for(i, 0, MAX_CGRPS) { + q_idx = __sync_fetch_and_add(&cgrp_q_idx_cursor, 1) % MAX_CGRPS; + if (!__sync_val_compare_and_swap(&cgrp_q_idx_busy[q_idx], 0, 1)) + break; + } + if (i == MAX_CGRPS) + return -EBUSY; + + if (bpf_map_update_elem(&cgrp_q_idx_hash, &cgid, &q_idx, BPF_ANY)) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [q_idx]); + if (busy) + *busy = 0; + return -EBUSY; + } + + return 0; +} + +void BPF_STRUCT_OPS(pair_cgroup_exit, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 *q_idx; + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (q_idx) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [*q_idx]); + if (busy) + *busy = 0; + bpf_map_delete_elem(&cgrp_q_idx_hash, &cgid); + } +} + +void BPF_STRUCT_OPS(pair_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +SCX_OPS_DEFINE(pair_ops, + .enqueue = (void *)pair_enqueue, + .dispatch = (void *)pair_dispatch, + .cpu_acquire = (void *)pair_cpu_acquire, + .cpu_release = (void *)pair_cpu_release, + .cgroup_init = (void *)pair_cgroup_init, + .cgroup_exit = (void *)pair_cgroup_exit, + .exit = (void *)pair_exit, + .name = "pair"); diff --git a/tools/sched_ext/scx_pair.c b/tools/sched_ext/scx_pair.c new file mode 100644 index 000000000000..41b136d43a55 --- /dev/null +++ b/tools/sched_ext/scx_pair.c @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#include <stdio.h> +#include <unistd.h> +#include <inttypes.h> +#include <signal.h> +#include <assert.h> +#include <libgen.h> +#include <bpf/bpf.h> +#include <scx/common.h> +#include "scx_pair.h" +#include "scx_pair.bpf.skel.h" + +const char help_fmt[] = +"A demo sched_ext core-scheduler which always makes every sibling CPU pair\n" +"execute from the same CPU cgroup.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-S STRIDE] [-v]\n" +"\n" +" -S STRIDE Override CPU pair stride (default: nr_cpus_ids / 2)\n" +" -v Print libbpf debug messages\n" +" -h Display this help and exit\n"; + +static bool verbose; +static volatile int exit_req; + +static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_pair *skel; + struct bpf_link *link; + __u64 seq = 0, ecode; + __s32 stride, i, opt, outer_fd; + __u32 pair_id = 0; + + libbpf_set_print(libbpf_print_fn); + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); +restart: + optind = 1; + skel = SCX_OPS_OPEN(pair_ops, scx_pair); + + skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + skel->rodata->pair_batch_dur_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); + + /* pair up the earlier half to the latter by default, override with -s */ + stride = skel->rodata->nr_cpu_ids / 2; + + while ((opt = getopt(argc, argv, "S:vh")) != -1) { + switch (opt) { + case 'S': + stride = strtoul(optarg, NULL, 0); + break; + case 'v': + verbose = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + /* Stride must be positive to pair distinct CPUs. */ + if (stride <= 0) { + fprintf(stderr, "Invalid stride %d, must be positive\n", stride); + scx_pair__destroy(skel); + return -1; + } + + if (skel->rodata->nr_cpu_ids & 1) { + fprintf(stderr, "scx_pair requires an even CPU count, got %u\n", + skel->rodata->nr_cpu_ids); + scx_pair__destroy(skel); + return -1; + } + + bpf_map__set_max_entries(skel->maps.pair_ctx, skel->rodata->nr_cpu_ids / 2); + + /* Resize arrays so their element count is equal to cpu count. */ + RESIZE_ARRAY(skel, rodata, pair_cpu, skel->rodata->nr_cpu_ids); + RESIZE_ARRAY(skel, rodata, pair_id, skel->rodata->nr_cpu_ids); + RESIZE_ARRAY(skel, rodata, in_pair_idx, skel->rodata->nr_cpu_ids); + + for (i = 0; i < skel->rodata->nr_cpu_ids; i++) + skel->rodata_pair_cpu->pair_cpu[i] = -1; + + printf("Pairs: "); + for (i = 0; i < skel->rodata->nr_cpu_ids; i++) { + int j = (i + stride) % skel->rodata->nr_cpu_ids; + + if (skel->rodata_pair_cpu->pair_cpu[i] >= 0) + continue; + + SCX_BUG_ON(i == j, + "Invalid stride %d - CPU%d wants to be its own pair", + stride, i); + + SCX_BUG_ON(skel->rodata_pair_cpu->pair_cpu[j] >= 0, + "Invalid stride %d - three CPUs (%d, %d, %d) want to be a pair", + stride, i, j, skel->rodata_pair_cpu->pair_cpu[j]); + + skel->rodata_pair_cpu->pair_cpu[i] = j; + skel->rodata_pair_cpu->pair_cpu[j] = i; + skel->rodata_pair_id->pair_id[i] = pair_id; + skel->rodata_pair_id->pair_id[j] = pair_id; + skel->rodata_in_pair_idx->in_pair_idx[i] = 0; + skel->rodata_in_pair_idx->in_pair_idx[j] = 1; + pair_id++; + + printf("[%d, %d] ", i, j); + } + printf("\n"); + + SCX_OPS_LOAD(skel, pair_ops, scx_pair, uei); + + /* + * Populate the cgrp_q_arr map which is an array containing per-cgroup + * queues. It'd probably be better to do this from BPF but there are too + * many to initialize statically and there's no way to dynamically + * populate from BPF. + */ + outer_fd = bpf_map__fd(skel->maps.cgrp_q_arr); + SCX_BUG_ON(outer_fd < 0, "Failed to get outer_fd: %d", outer_fd); + + printf("Initializing"); + for (i = 0; i < MAX_CGRPS; i++) { + __s32 inner_fd; + + if (exit_req) + break; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, + sizeof(__u32), MAX_QUEUED, NULL); + SCX_BUG_ON(inner_fd < 0, "Failed to get inner_fd: %d", + inner_fd); + SCX_BUG_ON(bpf_map_update_elem(outer_fd, &i, &inner_fd, BPF_ANY), + "Failed to set inner map"); + close(inner_fd); + + if (!(i % 10)) + printf("."); + fflush(stdout); + } + printf("\n"); + + /* + * Fully initialized, attach and run. + */ + link = SCX_OPS_ATTACH(skel, pair_ops, scx_pair); + + while (!exit_req && !UEI_EXITED(skel, uei)) { + printf("[SEQ %llu]\n", seq++); + printf(" total:%10" PRIu64 " dispatch:%10" PRIu64 " missing:%10" PRIu64 "\n", + skel->bss->nr_total, + skel->bss->nr_dispatched, + skel->bss->nr_missing); + printf(" kicks:%10" PRIu64 " preemptions:%7" PRIu64 "\n", + skel->bss->nr_kicks, + skel->bss->nr_preemptions); + printf(" exp:%10" PRIu64 " exp_wait:%10" PRIu64 " exp_empty:%10" PRIu64 "\n", + skel->bss->nr_exps, + skel->bss->nr_exp_waits, + skel->bss->nr_exp_empty); + printf("cgnext:%10" PRIu64 " cgcoll:%10" PRIu64 " cgempty:%10" PRIu64 "\n", + skel->bss->nr_cgrp_next, + skel->bss->nr_cgrp_coll, + skel->bss->nr_cgrp_empty); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + ecode = UEI_REPORT(skel, uei); + scx_pair__destroy(skel); + + if (UEI_ECODE_RESTART(ecode)) + goto restart; + return 0; +} diff --git a/tools/sched_ext/scx_pair.h b/tools/sched_ext/scx_pair.h new file mode 100644 index 000000000000..d9666a447d3f --- /dev/null +++ b/tools/sched_ext/scx_pair.h @@ -0,0 +1,9 @@ +#ifndef __SCX_EXAMPLE_PAIR_H +#define __SCX_EXAMPLE_PAIR_H + +enum { + MAX_QUEUED = 4096, + MAX_CGRPS = 4096, +}; + +#endif /* __SCX_EXAMPLE_PAIR_H */ diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index 3a20bb0c014a..b68abb9e760b 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -11,8 +11,6 @@ * * - BPF-side queueing using PIDs. * - Sleepable per-task storage allocation using ops.prep_enable(). - * - Using ops.cpu_release() to handle a higher priority scheduling class taking - * the CPU away. * - Core-sched support. * * This scheduler is primarily for demonstration and testing of sched_ext @@ -26,8 +24,11 @@ enum consts { ONE_SEC_IN_NS = 1000000000, + ONE_MSEC_IN_NS = 1000000, + LOWPRI_INTV_NS = 10 * ONE_MSEC_IN_NS, SHARED_DSQ = 0, HIGHPRI_DSQ = 1, + LOWPRI_DSQ = 2, HIGHPRI_WEIGHT = 8668, /* this is what -20 maps to */ }; @@ -39,13 +40,20 @@ const volatile u32 stall_kernel_nth; const volatile u32 dsp_inf_loop_after; const volatile u32 dsp_batch; const volatile bool highpri_boosting; -const volatile bool print_shared_dsq; +const volatile bool print_dsqs_and_events; +const volatile bool print_msgs; +const volatile u64 sub_cgroup_id; const volatile s32 disallow_tgid; const volatile bool suppress_dump; +const volatile bool always_enq_immed; +const volatile u32 immed_stress_nth; u64 nr_highpri_queued; u32 test_error_cnt; +#define MAX_SUB_SCHEDS 8 +u64 sub_sched_cgroup_ids[MAX_SUB_SCHEDS]; + UEI_DEFINE(uei); struct qmap { @@ -56,7 +64,8 @@ struct qmap { queue1 SEC(".maps"), queue2 SEC(".maps"), queue3 SEC(".maps"), - queue4 SEC(".maps"); + queue4 SEC(".maps"), + dump_store SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); @@ -125,7 +134,7 @@ struct { } cpu_ctx_stor SEC(".maps"); /* Statistics */ -u64 nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued, nr_ddsp_from_enq; +u64 nr_enqueued, nr_dispatched, nr_reenqueued, nr_reenqueued_cpu0, nr_dequeued, nr_ddsp_from_enq; u64 nr_core_sched_execed; u64 nr_expedited_local, nr_expedited_remote, nr_expedited_lost, nr_expedited_from_timer; u32 cpuperf_min, cpuperf_avg, cpuperf_max; @@ -135,8 +144,10 @@ static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu) { s32 cpu; - if (p->nr_cpus_allowed == 1 || - scx_bpf_test_and_clear_cpu_idle(prev_cpu)) + if (!always_enq_immed && p->nr_cpus_allowed == 1) + return prev_cpu; + + if (scx_bpf_test_and_clear_cpu_idle(prev_cpu)) return prev_cpu; cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); @@ -166,6 +177,9 @@ s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, if (!(tctx = lookup_task_ctx(p))) return -ESRCH; + if (p->scx.weight < 2 && !(p->flags & PF_KTHREAD)) + return prev_cpu; + cpu = pick_direct_dispatch_cpu(p, prev_cpu); if (cpu >= 0) { @@ -200,6 +214,12 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) void *ring; s32 cpu; + if (enq_flags & SCX_ENQ_REENQ) { + __sync_fetch_and_add(&nr_reenqueued, 1); + if (scx_bpf_task_cpu(p) == 0) + __sync_fetch_and_add(&nr_reenqueued_cpu0, 1); + } + if (p->flags & PF_KTHREAD) { if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth)) return; @@ -221,6 +241,22 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) tctx->core_sched_seq = core_sched_tail_seqs[idx]++; /* + * IMMED stress testing: Every immed_stress_nth'th enqueue, dispatch + * directly to prev_cpu's local DSQ even when busy to force dsq->nr > 1 + * and exercise the kernel IMMED reenqueue trigger paths. + */ + if (immed_stress_nth && !(enq_flags & SCX_ENQ_REENQ)) { + static u32 immed_stress_cnt; + + if (!(++immed_stress_cnt % immed_stress_nth)) { + tctx->force_local = false; + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | scx_bpf_task_cpu(p), + slice_ns, enq_flags); + return; + } + } + + /* * If qmap_select_cpu() is telling us to or this is the last runnable * task on the CPU, enqueue locally. */ @@ -230,8 +266,15 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) return; } + /* see lowpri_timerfn() */ + if (__COMPAT_has_generic_reenq() && + p->scx.weight < 2 && !(p->flags & PF_KTHREAD) && !(enq_flags & SCX_ENQ_REENQ)) { + scx_bpf_dsq_insert(p, LOWPRI_DSQ, slice_ns, enq_flags); + return; + } + /* if select_cpu() wasn't called, try direct dispatch */ - if (!(enq_flags & SCX_ENQ_CPU_SELECTED) && + if (!__COMPAT_is_enq_cpu_selected(enq_flags) && (cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) { __sync_fetch_and_add(&nr_ddsp_from_enq, 1); scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags); @@ -318,12 +361,9 @@ static bool dispatch_highpri(bool from_timer) if (tctx->highpri) { /* exercise the set_*() and vtime interface too */ - __COMPAT_scx_bpf_dsq_move_set_slice( - BPF_FOR_EACH_ITER, slice_ns * 2); - __COMPAT_scx_bpf_dsq_move_set_vtime( - BPF_FOR_EACH_ITER, highpri_seq++); - __COMPAT_scx_bpf_dsq_move_vtime( - BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0); + scx_bpf_dsq_move_set_slice(BPF_FOR_EACH_ITER, slice_ns * 2); + scx_bpf_dsq_move_set_vtime(BPF_FOR_EACH_ITER, highpri_seq++); + scx_bpf_dsq_move_vtime(BPF_FOR_EACH_ITER, p, HIGHPRI_DSQ, 0); } } @@ -340,9 +380,8 @@ static bool dispatch_highpri(bool from_timer) else cpu = scx_bpf_pick_any_cpu(p->cpus_ptr, 0); - if (__COMPAT_scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, - SCX_DSQ_LOCAL_ON | cpu, - SCX_ENQ_PREEMPT)) { + if (scx_bpf_dsq_move(BPF_FOR_EACH_ITER, p, SCX_DSQ_LOCAL_ON | cpu, + SCX_ENQ_PREEMPT)) { if (cpu == this_cpu) { dispatched = true; __sync_fetch_and_add(&nr_expedited_local, 1); @@ -374,7 +413,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) if (dispatch_highpri(false)) return; - if (!nr_highpri_queued && scx_bpf_dsq_move_to_local(SHARED_DSQ)) + if (!nr_highpri_queued && scx_bpf_dsq_move_to_local(SHARED_DSQ, 0)) return; if (dsp_inf_loop_after && nr_dispatched > dsp_inf_loop_after) { @@ -432,6 +471,46 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) __sync_fetch_and_add(&nr_dispatched, 1); scx_bpf_dsq_insert(p, SHARED_DSQ, slice_ns, 0); + + /* + * scx_qmap uses a global BPF queue that any CPU's + * dispatch can pop from. If this CPU popped a task that + * can't run here, it gets stranded on SHARED_DSQ after + * consume_dispatch_q() skips it. Kick the task's home + * CPU so it drains SHARED_DSQ. + * + * There's a race between the pop and the flush of the + * buffered dsq_insert: + * + * CPU 0 (dispatching) CPU 1 (home, idle) + * ~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~ + * pop from BPF queue + * dsq_insert(buffered) + * balance: + * SHARED_DSQ empty + * BPF queue empty + * -> goes idle + * flush -> on SHARED + * kick CPU 1 + * wakes, drains task + * + * The kick prevents indefinite stalls but a per-CPU + * kthread like ksoftirqd can be briefly stranded when + * its home CPU enters idle with softirq pending, + * triggering: + * + * "NOHZ tick-stop error: local softirq work is pending, handler #N!!!" + * + * from report_idle_softirq(). The kick lands shortly + * after and the home CPU drains the task. This could be + * avoided by e.g. dispatching pinned tasks to local or + * global DSQs, but the current code is left as-is to + * document this class of issue -- other schedulers + * seeing similar warnings can use this as a reference. + */ + if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) + scx_bpf_kick_cpu(scx_bpf_task_cpu(p), 0); + bpf_task_release(p); batch--; @@ -439,7 +518,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) if (!batch || !scx_bpf_dispatch_nr_slots()) { if (dispatch_highpri(false)) return; - scx_bpf_dsq_move_to_local(SHARED_DSQ); + scx_bpf_dsq_move_to_local(SHARED_DSQ, 0); return; } if (!cpuc->dsp_cnt) @@ -449,6 +528,12 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) cpuc->dsp_cnt = 0; } + for (i = 0; i < MAX_SUB_SCHEDS; i++) { + if (sub_sched_cgroup_ids[i] && + scx_bpf_sub_dispatch(sub_sched_cgroup_ids[i])) + return; + } + /* * No other tasks. @prev will keep running. Update its core_sched_seq as * if the task were enqueued and dispatched immediately. @@ -531,21 +616,11 @@ bool BPF_STRUCT_OPS(qmap_core_sched_before, return task_qdist(a) > task_qdist(b); } -void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args) -{ - u32 cnt; - - /* - * Called when @cpu is taken by a higher priority scheduling class. This - * makes @cpu no longer available for executing sched_ext tasks. As we - * don't want the tasks in @cpu's local dsq to sit there until @cpu - * becomes available again, re-enqueue them into the global dsq. See - * %SCX_ENQ_REENQ handling in qmap_enqueue(). - */ - cnt = scx_bpf_reenqueue_local(); - if (cnt) - __sync_fetch_and_add(&nr_reenqueued, cnt); -} +/* + * sched_switch tracepoint and cpu_release handlers are no longer needed. + * With SCX_OPS_ALWAYS_ENQ_IMMED, wakeup_preempt_scx() reenqueues IMMED + * tasks when a higher-priority scheduling class takes the CPU. + */ s32 BPF_STRUCT_OPS(qmap_init_task, struct task_struct *p, struct scx_init_task_args *args) @@ -578,11 +653,26 @@ void BPF_STRUCT_OPS(qmap_dump, struct scx_dump_ctx *dctx) return; scx_bpf_dump("QMAP FIFO[%d]:", i); + + /* + * Dump can be invoked anytime and there is no way to iterate in + * a non-destructive way. Pop and store in dump_store and then + * restore afterwards. If racing against new enqueues, ordering + * can get mixed up. + */ bpf_repeat(4096) { if (bpf_map_pop_elem(fifo, &pid)) break; + bpf_map_push_elem(&dump_store, &pid, 0); scx_bpf_dump(" %d", pid); } + + bpf_repeat(4096) { + if (bpf_map_pop_elem(&dump_store, &pid)) + break; + bpf_map_push_elem(fifo, &pid, 0); + } + scx_bpf_dump("\n"); } } @@ -615,6 +705,29 @@ void BPF_STRUCT_OPS(qmap_dump_task, struct scx_dump_ctx *dctx, struct task_struc taskc->force_local, taskc->core_sched_seq); } +s32 BPF_STRUCT_OPS(qmap_cgroup_init, struct cgroup *cgrp, struct scx_cgroup_init_args *args) +{ + if (print_msgs) + bpf_printk("CGRP INIT %llu weight=%u period=%lu quota=%ld burst=%lu", + cgrp->kn->id, args->weight, args->bw_period_us, + args->bw_quota_us, args->bw_burst_us); + return 0; +} + +void BPF_STRUCT_OPS(qmap_cgroup_set_weight, struct cgroup *cgrp, u32 weight) +{ + if (print_msgs) + bpf_printk("CGRP SET %llu weight=%u", cgrp->kn->id, weight); +} + +void BPF_STRUCT_OPS(qmap_cgroup_set_bandwidth, struct cgroup *cgrp, + u64 period_us, u64 quota_us, u64 burst_us) +{ + if (print_msgs) + bpf_printk("CGRP SET %llu period=%lu quota=%ld burst=%lu", + cgrp->kn->id, period_us, quota_us, burst_us); +} + /* * Print out the online and possible CPU map using bpf_printk() as a * demonstration of using the cpumask kfuncs and ops.cpu_on/offline(). @@ -656,16 +769,20 @@ static void print_cpus(void) void BPF_STRUCT_OPS(qmap_cpu_online, s32 cpu) { - bpf_printk("CPU %d coming online", cpu); - /* @cpu is already online at this point */ - print_cpus(); + if (print_msgs) { + bpf_printk("CPU %d coming online", cpu); + /* @cpu is already online at this point */ + print_cpus(); + } } void BPF_STRUCT_OPS(qmap_cpu_offline, s32 cpu) { - bpf_printk("CPU %d going offline", cpu); - /* @cpu is still online at this point */ - print_cpus(); + if (print_msgs) { + bpf_printk("CPU %d going offline", cpu); + /* @cpu is still online at this point */ + print_cpus(); + } } struct monitor_timer { @@ -769,37 +886,104 @@ static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer) monitor_cpuperf(); - if (print_shared_dsq) + if (print_dsqs_and_events) { + struct scx_event_stats events; + dump_shared_dsq(); + __COMPAT_scx_bpf_events(&events, sizeof(events)); + + bpf_printk("%35s: %lld", "SCX_EV_SELECT_CPU_FALLBACK", + scx_read_event(&events, SCX_EV_SELECT_CPU_FALLBACK)); + bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE", + scx_read_event(&events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE)); + bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_KEEP_LAST", + scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST)); + bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING", + scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING)); + bpf_printk("%35s: %lld", "SCX_EV_REFILL_SLICE_DFL", + scx_read_event(&events, SCX_EV_REFILL_SLICE_DFL)); + bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION", + scx_read_event(&events, SCX_EV_BYPASS_DURATION)); + bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH", + scx_read_event(&events, SCX_EV_BYPASS_DISPATCH)); + bpf_printk("%35s: %lld", "SCX_EV_BYPASS_ACTIVATE", + scx_read_event(&events, SCX_EV_BYPASS_ACTIVATE)); + } + bpf_timer_start(timer, ONE_SEC_IN_NS, 0); return 0; } +struct lowpri_timer { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct lowpri_timer); +} lowpri_timer SEC(".maps"); + +/* + * Nice 19 tasks are put into the lowpri DSQ. Every 10ms, reenq is triggered and + * the tasks are transferred to SHARED_DSQ. + */ +static int lowpri_timerfn(void *map, int *key, struct bpf_timer *timer) +{ + scx_bpf_dsq_reenq(LOWPRI_DSQ, 0); + bpf_timer_start(timer, LOWPRI_INTV_NS, 0); + return 0; +} + s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init) { u32 key = 0; struct bpf_timer *timer; s32 ret; - print_cpus(); + if (print_msgs && !sub_cgroup_id) + print_cpus(); ret = scx_bpf_create_dsq(SHARED_DSQ, -1); - if (ret) + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", SHARED_DSQ, ret); return ret; + } ret = scx_bpf_create_dsq(HIGHPRI_DSQ, -1); + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", HIGHPRI_DSQ, ret); + return ret; + } + + ret = scx_bpf_create_dsq(LOWPRI_DSQ, -1); if (ret) return ret; timer = bpf_map_lookup_elem(&monitor_timer, &key); if (!timer) return -ESRCH; - bpf_timer_init(timer, &monitor_timer, CLOCK_MONOTONIC); bpf_timer_set_callback(timer, monitor_timerfn); + ret = bpf_timer_start(timer, ONE_SEC_IN_NS, 0); + if (ret) + return ret; - return bpf_timer_start(timer, ONE_SEC_IN_NS, 0); + if (__COMPAT_has_generic_reenq()) { + /* see lowpri_timerfn() */ + timer = bpf_map_lookup_elem(&lowpri_timer, &key); + if (!timer) + return -ESRCH; + bpf_timer_init(timer, &lowpri_timer, CLOCK_MONOTONIC); + bpf_timer_set_callback(timer, lowpri_timerfn); + ret = bpf_timer_start(timer, LOWPRI_INTV_NS, 0); + if (ret) + return ret; + } + + return 0; } void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) @@ -807,6 +991,36 @@ void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) UEI_RECORD(uei, ei); } +s32 BPF_STRUCT_OPS(qmap_sub_attach, struct scx_sub_attach_args *args) +{ + s32 i; + + for (i = 0; i < MAX_SUB_SCHEDS; i++) { + if (!sub_sched_cgroup_ids[i]) { + sub_sched_cgroup_ids[i] = args->ops->sub_cgroup_id; + bpf_printk("attaching sub-sched[%d] on %s", + i, args->cgroup_path); + return 0; + } + } + + return -ENOSPC; +} + +void BPF_STRUCT_OPS(qmap_sub_detach, struct scx_sub_detach_args *args) +{ + s32 i; + + for (i = 0; i < MAX_SUB_SCHEDS; i++) { + if (sub_sched_cgroup_ids[i] == args->ops->sub_cgroup_id) { + sub_sched_cgroup_ids[i] = 0; + bpf_printk("detaching sub-sched[%d] on %s", + i, args->cgroup_path); + break; + } + } +} + SCX_OPS_DEFINE(qmap_ops, .select_cpu = (void *)qmap_select_cpu, .enqueue = (void *)qmap_enqueue, @@ -814,11 +1028,15 @@ SCX_OPS_DEFINE(qmap_ops, .dispatch = (void *)qmap_dispatch, .tick = (void *)qmap_tick, .core_sched_before = (void *)qmap_core_sched_before, - .cpu_release = (void *)qmap_cpu_release, .init_task = (void *)qmap_init_task, .dump = (void *)qmap_dump, .dump_cpu = (void *)qmap_dump_cpu, .dump_task = (void *)qmap_dump_task, + .cgroup_init = (void *)qmap_cgroup_init, + .cgroup_set_weight = (void *)qmap_cgroup_set_weight, + .cgroup_set_bandwidth = (void *)qmap_cgroup_set_bandwidth, + .sub_attach = (void *)qmap_sub_attach, + .sub_detach = (void *)qmap_sub_detach, .cpu_online = (void *)qmap_cpu_online, .cpu_offline = (void *)qmap_cpu_offline, .init = (void *)qmap_init, diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c index c4912ab2e76f..e7c89a2bc3d8 100644 --- a/tools/sched_ext/scx_qmap.c +++ b/tools/sched_ext/scx_qmap.c @@ -10,6 +10,7 @@ #include <inttypes.h> #include <signal.h> #include <libgen.h> +#include <sys/stat.h> #include <bpf/bpf.h> #include <scx/common.h> #include "scx_qmap.bpf.skel.h" @@ -20,7 +21,7 @@ const char help_fmt[] = "See the top-level comment in .bpf.c for more details.\n" "\n" "Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n" -" [-P] [-d PID] [-D LEN] [-p] [-v]\n" +" [-P] [-M] [-H] [-d PID] [-D LEN] [-S] [-p] [-I] [-F COUNT] [-v]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" @@ -28,12 +29,15 @@ const char help_fmt[] = " -T COUNT Stall every COUNT'th kernel thread\n" " -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n" " -b COUNT Dispatch upto COUNT tasks together\n" -" -P Print out DSQ content to trace_pipe every second, use with -b\n" +" -P Print out DSQ content and event counters to trace_pipe every second\n" +" -M Print out debug messages to trace_pipe\n" " -H Boost nice -20 tasks in SHARED_DSQ, use with -b\n" " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -D LEN Set scx_exit_info.dump buffer length\n" " -S Suppress qmap-specific debug dump\n" " -p Switch only tasks on SCHED_EXT policy instead of all\n" +" -I Turn on SCX_OPS_ALWAYS_ENQ_IMMED\n" +" -F COUNT IMMED stress: force every COUNT'th enqueue to a busy local DSQ (use with -I)\n" " -v Print libbpf debug messages\n" " -h Display this help and exit\n"; @@ -66,7 +70,7 @@ int main(int argc, char **argv) skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); - while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) { + while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PMHc:d:D:SpIF:vh")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; @@ -87,11 +91,24 @@ int main(int argc, char **argv) skel->rodata->dsp_batch = strtoul(optarg, NULL, 0); break; case 'P': - skel->rodata->print_shared_dsq = true; + skel->rodata->print_dsqs_and_events = true; + break; + case 'M': + skel->rodata->print_msgs = true; break; case 'H': skel->rodata->highpri_boosting = true; break; + case 'c': { + struct stat st; + if (stat(optarg, &st) < 0) { + perror("stat"); + return 1; + } + skel->struct_ops.qmap_ops->sub_cgroup_id = st.st_ino; + skel->rodata->sub_cgroup_id = st.st_ino; + break; + } case 'd': skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); if (skel->rodata->disallow_tgid < 0) @@ -106,6 +123,13 @@ int main(int argc, char **argv) case 'p': skel->struct_ops.qmap_ops->flags |= SCX_OPS_SWITCH_PARTIAL; break; + case 'I': + skel->rodata->always_enq_immed = true; + skel->struct_ops.qmap_ops->flags |= SCX_OPS_ALWAYS_ENQ_IMMED; + break; + case 'F': + skel->rodata->immed_stress_nth = strtoul(optarg, NULL, 0); + break; case 'v': verbose = true; break; @@ -122,9 +146,10 @@ int main(int argc, char **argv) long nr_enqueued = skel->bss->nr_enqueued; long nr_dispatched = skel->bss->nr_dispatched; - printf("stats : enq=%lu dsp=%lu delta=%ld reenq=%"PRIu64" deq=%"PRIu64" core=%"PRIu64" enq_ddsp=%"PRIu64"\n", + printf("stats : enq=%lu dsp=%lu delta=%ld reenq/cpu0=%"PRIu64"/%"PRIu64" deq=%"PRIu64" core=%"PRIu64" enq_ddsp=%"PRIu64"\n", nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, - skel->bss->nr_reenqueued, skel->bss->nr_dequeued, + skel->bss->nr_reenqueued, skel->bss->nr_reenqueued_cpu0, + skel->bss->nr_dequeued, skel->bss->nr_core_sched_execed, skel->bss->nr_ddsp_from_enq); printf(" exp_local=%"PRIu64" exp_remote=%"PRIu64" exp_timer=%"PRIu64" exp_lost=%"PRIu64"\n", diff --git a/tools/sched_ext/scx_sdt.bpf.c b/tools/sched_ext/scx_sdt.bpf.c new file mode 100644 index 000000000000..a1e33e6c412b --- /dev/null +++ b/tools/sched_ext/scx_sdt.bpf.c @@ -0,0 +1,717 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Arena-based task data scheduler. This is a variation of scx_simple + * that uses a combined allocator and indexing structure to organize + * task data. Task context allocation is done when a task enters the + * scheduler, while freeing is done when it exits. Task contexts are + * retrieved from task-local storage, pointing to the allocated memory. + * + * The main purpose of this scheduler is to demostrate arena memory + * management. + * + * Copyright (c) 2024-2025 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2024-2025 Emil Tsalapatis <etsal@meta.com> + * Copyright (c) 2024-2025 Tejun Heo <tj@kernel.org> + * + */ +#include <scx/common.bpf.h> +#include <scx/bpf_arena_common.bpf.h> + +#include "scx_sdt.h" + +char _license[] SEC("license") = "GPL"; + +UEI_DEFINE(uei); + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); +#if defined(__TARGET_ARCH_arm64) || defined(__aarch64__) + __uint(max_entries, 1 << 16); /* number of pages */ + __ulong(map_extra, (1ull << 32)); /* start of mmap() region */ +#else + __uint(max_entries, 1 << 20); /* number of pages */ + __ulong(map_extra, (1ull << 44)); /* start of mmap() region */ +#endif +} arena __weak SEC(".maps"); + +#define SHARED_DSQ 0 + +#define DEFINE_SDT_STAT(metric) \ +static inline void \ +stat_inc_##metric(struct scx_stats __arena *stats) \ +{ \ + cast_kern(stats); \ + stats->metric += 1; \ +} \ +__u64 stat_##metric; \ + +DEFINE_SDT_STAT(enqueue); +DEFINE_SDT_STAT(init); +DEFINE_SDT_STAT(exit); +DEFINE_SDT_STAT(select_idle_cpu); +DEFINE_SDT_STAT(select_busy_cpu); + +/* + * Necessary for cond_break/can_loop's semantics. According to kernel commit + * 011832b, the loop counter variable must be seen as imprecise and bounded + * by the verifier. Initializing it from a constant (e.g., i = 0;), then, + * makes it precise and prevents may_goto from helping with converging the + * loop. For these loops we must initialize the loop counter from a variable + * whose value the verifier cannot reason about when checking the program, so + * that the loop counter's value is imprecise. + */ +static __u64 zero = 0; + +/* + * XXX Hack to get the verifier to find the arena for sdt_exit_task. + * As of 6.12-rc5, The verifier associates arenas with programs by + * checking LD.IMM instruction operands for an arena and populating + * the program state with the first instance it finds. This requires + * accessing our global arena variable, but scx methods do not necessarily + * do so while still using pointers from that arena. Insert a bpf_printk + * statement that triggers at most once to generate an LD.IMM instruction + * to access the arena and help the verifier. + */ +static volatile bool scx_arena_verify_once; + +__hidden void scx_arena_subprog_init(void) +{ + if (scx_arena_verify_once) + return; + + bpf_printk("%s: arena pointer %p", __func__, &arena); + scx_arena_verify_once = true; +} + + +private(LOCK) struct bpf_spin_lock alloc_lock; +private(POOL_LOCK) struct bpf_spin_lock alloc_pool_lock; + +/* allocation pools */ +struct sdt_pool desc_pool; +struct sdt_pool chunk_pool; + +/* Protected by alloc_lock. */ +struct scx_alloc_stats alloc_stats; + + +/* Allocate element from the pool. Must be called with a then pool lock held. */ +static +void __arena *scx_alloc_from_pool(struct sdt_pool *pool) +{ + __u64 elem_size, max_elems; + void __arena *slab; + void __arena *ptr; + + elem_size = pool->elem_size; + max_elems = pool->max_elems; + + /* If the chunk is spent, get a new one. */ + if (pool->idx >= max_elems) { + slab = bpf_arena_alloc_pages(&arena, NULL, + div_round_up(max_elems * elem_size, PAGE_SIZE), NUMA_NO_NODE, 0); + if (!slab) + return NULL; + + pool->slab = slab; + pool->idx = 0; + } + + ptr = (void __arena *)((__u64) pool->slab + elem_size * pool->idx); + pool->idx += 1; + + return ptr; +} + +/* Alloc desc and associated chunk. Called with the allocator spinlock held. */ +static sdt_desc_t *scx_alloc_chunk(void) +{ + struct sdt_chunk __arena *chunk; + sdt_desc_t *desc; + sdt_desc_t *out; + + chunk = scx_alloc_from_pool(&chunk_pool); + if (!chunk) + return NULL; + + desc = scx_alloc_from_pool(&desc_pool); + if (!desc) { + /* + * Effectively frees the previous chunk allocation. + * Index cannot be 0, so decrementing is always + * valid. + */ + chunk_pool.idx -= 1; + return NULL; + } + + out = desc; + + desc->nr_free = SDT_TASK_ENTS_PER_CHUNK; + desc->chunk = chunk; + + alloc_stats.chunk_allocs += 1; + + return out; +} + +static int pool_set_size(struct sdt_pool *pool, __u64 data_size, __u64 nr_pages) +{ + if (unlikely(data_size % 8)) + return -EINVAL; + + if (unlikely(nr_pages == 0)) + return -EINVAL; + + pool->elem_size = data_size; + pool->max_elems = (PAGE_SIZE * nr_pages) / pool->elem_size; + /* Populate the pool slab on the first allocation. */ + pool->idx = pool->max_elems; + + return 0; +} + +/* Initialize both the base pool allocators and the root chunk of the index. */ +__hidden int +scx_alloc_init(struct scx_allocator *alloc, __u64 data_size) +{ + size_t min_chunk_size; + int ret; + + _Static_assert(sizeof(struct sdt_chunk) <= PAGE_SIZE, + "chunk size must fit into a page"); + + ret = pool_set_size(&chunk_pool, sizeof(struct sdt_chunk), 1); + if (ret != 0) + return ret; + + ret = pool_set_size(&desc_pool, sizeof(struct sdt_desc), 1); + if (ret != 0) + return ret; + + /* Wrap data into a descriptor and word align. */ + data_size += sizeof(struct sdt_data); + data_size = round_up(data_size, 8); + + /* + * Ensure we allocate large enough chunks from the arena to avoid excessive + * internal fragmentation when turning chunks it into structs. + */ + min_chunk_size = div_round_up(SDT_TASK_MIN_ELEM_PER_ALLOC * data_size, PAGE_SIZE); + ret = pool_set_size(&alloc->pool, data_size, min_chunk_size); + if (ret != 0) + return ret; + + bpf_spin_lock(&alloc_lock); + alloc->root = scx_alloc_chunk(); + bpf_spin_unlock(&alloc_lock); + if (!alloc->root) + return -ENOMEM; + + return 0; +} + +static +int set_idx_state(sdt_desc_t *desc, __u64 pos, bool state) +{ + __u64 __arena *allocated = desc->allocated; + __u64 bit; + + if (unlikely(pos >= SDT_TASK_ENTS_PER_CHUNK)) + return -EINVAL; + + bit = (__u64)1 << (pos % 64); + + if (state) + allocated[pos / 64] |= bit; + else + allocated[pos / 64] &= ~bit; + + return 0; +} + +static __noinline +int mark_nodes_avail(sdt_desc_t *lv_desc[SDT_TASK_LEVELS], __u64 lv_pos[SDT_TASK_LEVELS]) +{ + sdt_desc_t *desc; + __u64 u, level; + int ret; + + for (u = zero; u < SDT_TASK_LEVELS && can_loop; u++) { + level = SDT_TASK_LEVELS - 1 - u; + + /* Only propagate upwards if we are the parent's only free chunk. */ + desc = lv_desc[level]; + + ret = set_idx_state(desc, lv_pos[level], false); + if (unlikely(ret != 0)) + return ret; + + desc->nr_free += 1; + if (desc->nr_free > 1) + return 0; + } + + return 0; +} + +/* + * Free the allocated struct with the given index. Called with the + * allocator lock taken. + */ +__hidden +int scx_alloc_free_idx(struct scx_allocator *alloc, __u64 idx) +{ + const __u64 mask = (1 << SDT_TASK_ENTS_PER_PAGE_SHIFT) - 1; + sdt_desc_t *lv_desc[SDT_TASK_LEVELS]; + sdt_desc_t * __arena *desc_children; + struct sdt_chunk __arena *chunk; + sdt_desc_t *desc; + struct sdt_data __arena *data; + __u64 level, shift, pos; + __u64 lv_pos[SDT_TASK_LEVELS]; + int ret; + int i; + + if (!alloc) + return 0; + + desc = alloc->root; + if (unlikely(!desc)) + return -EINVAL; + + /* To appease the verifier. */ + for (level = zero; level < SDT_TASK_LEVELS && can_loop; level++) { + lv_desc[level] = NULL; + lv_pos[level] = 0; + } + + /* Find the leaf node containing the index. */ + for (level = zero; level < SDT_TASK_LEVELS && can_loop; level++) { + shift = (SDT_TASK_LEVELS - 1 - level) * SDT_TASK_ENTS_PER_PAGE_SHIFT; + pos = (idx >> shift) & mask; + + lv_desc[level] = desc; + lv_pos[level] = pos; + + if (level == SDT_TASK_LEVELS - 1) + break; + + chunk = desc->chunk; + + desc_children = (sdt_desc_t * __arena *)chunk->descs; + desc = desc_children[pos]; + + if (unlikely(!desc)) + return -EINVAL; + } + + chunk = desc->chunk; + + pos = idx & mask; + data = chunk->data[pos]; + if (likely(data)) { + *data = (struct sdt_data) { + .tid.genn = data->tid.genn + 1, + }; + + /* Zero out one word at a time. */ + for (i = zero; i < (alloc->pool.elem_size - sizeof(struct sdt_data)) / 8 + && can_loop; i++) { + data->payload[i] = 0; + } + } + + ret = mark_nodes_avail(lv_desc, lv_pos); + if (unlikely(ret != 0)) + return ret; + + alloc_stats.active_allocs -= 1; + alloc_stats.free_ops += 1; + + return 0; +} + +static inline +int ffs(__u64 word) +{ + unsigned int num = 0; + + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } + + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + + if ((word & 0x1) == 0) { + num += 1; + word >>= 1; + } + + return num; +} + + +/* find the first empty slot */ +__hidden +__u64 chunk_find_empty(sdt_desc_t __arg_arena *desc) +{ + __u64 freeslots; + __u64 i; + + for (i = 0; i < SDT_TASK_CHUNK_BITMAP_U64S; i++) { + freeslots = ~desc->allocated[i]; + if (freeslots == (__u64)0) + continue; + + return (i * 64) + ffs(freeslots); + } + + return SDT_TASK_ENTS_PER_CHUNK; +} + +/* + * Find and return an available idx on the allocator. + * Called with the task spinlock held. + */ +static sdt_desc_t * desc_find_empty(sdt_desc_t *desc, __u64 *idxp) +{ + sdt_desc_t *lv_desc[SDT_TASK_LEVELS]; + sdt_desc_t * __arena *desc_children; + struct sdt_chunk __arena *chunk; + sdt_desc_t *tmp; + __u64 lv_pos[SDT_TASK_LEVELS]; + __u64 u, pos, level; + __u64 idx = 0; + int ret; + + for (level = zero; level < SDT_TASK_LEVELS && can_loop; level++) { + pos = chunk_find_empty(desc); + + /* If we error out, something has gone very wrong. */ + if (unlikely(pos > SDT_TASK_ENTS_PER_CHUNK)) + return NULL; + + if (pos == SDT_TASK_ENTS_PER_CHUNK) + return NULL; + + idx <<= SDT_TASK_ENTS_PER_PAGE_SHIFT; + idx |= pos; + + /* Log the levels to complete allocation. */ + lv_desc[level] = desc; + lv_pos[level] = pos; + + /* The rest of the loop is for internal node traversal. */ + if (level == SDT_TASK_LEVELS - 1) + break; + + /* Allocate an internal node if necessary. */ + chunk = desc->chunk; + desc_children = (sdt_desc_t * __arena *)chunk->descs; + + desc = desc_children[pos]; + if (!desc) { + desc = scx_alloc_chunk(); + if (!desc) + return NULL; + + desc_children[pos] = desc; + } + } + + /* + * Finding the descriptor along with any internal node + * allocations was successful. Update all levels with + * the new allocation. + */ + bpf_for(u, 0, SDT_TASK_LEVELS) { + level = SDT_TASK_LEVELS - 1 - u; + tmp = lv_desc[level]; + + ret = set_idx_state(tmp, lv_pos[level], true); + if (ret != 0) + break; + + tmp->nr_free -= 1; + if (tmp->nr_free > 0) + break; + } + + *idxp = idx; + + return desc; +} + +__hidden +void __arena *scx_alloc(struct scx_allocator *alloc) +{ + struct sdt_data __arena *data = NULL; + struct sdt_chunk __arena *chunk; + sdt_desc_t *desc; + __u64 idx, pos; + + if (!alloc) + return NULL; + + bpf_spin_lock(&alloc_lock); + + /* We unlock if we encounter an error in the function. */ + desc = desc_find_empty(alloc->root, &idx); + if (unlikely(desc == NULL)) { + bpf_spin_unlock(&alloc_lock); + return NULL; + } + + chunk = desc->chunk; + + /* Populate the leaf node if necessary. */ + pos = idx & (SDT_TASK_ENTS_PER_CHUNK - 1); + data = chunk->data[pos]; + if (!data) { + data = scx_alloc_from_pool(&alloc->pool); + if (!data) { + scx_alloc_free_idx(alloc, idx); + bpf_spin_unlock(&alloc_lock); + return NULL; + } + } + + chunk->data[pos] = data; + + /* The data counts as a chunk */ + alloc_stats.data_allocs += 1; + alloc_stats.alloc_ops += 1; + alloc_stats.active_allocs += 1; + + data->tid.idx = idx; + + bpf_spin_unlock(&alloc_lock); + + return data; +} + +/* + * Task BPF map entry recording the task's assigned ID and pointing to the data + * area allocated in arena. + */ +struct scx_task_map_val { + union sdt_id tid; + __u64 tptr; + struct sdt_data __arena *data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct scx_task_map_val); +} scx_task_map SEC(".maps"); + +static struct scx_allocator scx_task_allocator; + +__hidden +void __arena *scx_task_alloc(struct task_struct *p) +{ + struct sdt_data __arena *data = NULL; + struct scx_task_map_val *mval; + + mval = bpf_task_storage_get(&scx_task_map, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!mval) + return NULL; + + data = scx_alloc(&scx_task_allocator); + if (unlikely(!data)) + return NULL; + + mval->tid = data->tid; + mval->tptr = (__u64) p; + mval->data = data; + + return (void __arena *)data->payload; +} + +__hidden +int scx_task_init(__u64 data_size) +{ + return scx_alloc_init(&scx_task_allocator, data_size); +} + +__hidden +void __arena *scx_task_data(struct task_struct *p) +{ + struct sdt_data __arena *data; + struct scx_task_map_val *mval; + + scx_arena_subprog_init(); + + mval = bpf_task_storage_get(&scx_task_map, p, 0, 0); + if (!mval) + return NULL; + + data = mval->data; + + return (void __arena *)data->payload; +} + +__hidden +void scx_task_free(struct task_struct *p) +{ + struct scx_task_map_val *mval; + + scx_arena_subprog_init(); + + mval = bpf_task_storage_get(&scx_task_map, p, 0, 0); + if (!mval) + return; + + bpf_spin_lock(&alloc_lock); + scx_alloc_free_idx(&scx_task_allocator, mval->tid.idx); + bpf_spin_unlock(&alloc_lock); + + bpf_task_storage_delete(&scx_task_map, p); +} + +static inline void +scx_stat_global_update(struct scx_stats __arena *stats) +{ + cast_kern(stats); + __sync_fetch_and_add(&stat_enqueue, stats->enqueue); + __sync_fetch_and_add(&stat_init, stats->init); + __sync_fetch_and_add(&stat_exit, stats->exit); + __sync_fetch_and_add(&stat_select_idle_cpu, stats->select_idle_cpu); + __sync_fetch_and_add(&stat_select_busy_cpu, stats->select_busy_cpu); +} + +s32 BPF_STRUCT_OPS(sdt_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) +{ + struct scx_stats __arena *stats; + bool is_idle = false; + s32 cpu; + + stats = scx_task_data(p); + if (!stats) { + scx_bpf_error("%s: no stats for pid %d", __func__, p->pid); + return 0; + } + + cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle); + if (is_idle) { + stat_inc_select_idle_cpu(stats); + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); + } else { + stat_inc_select_busy_cpu(stats); + } + + return cpu; +} + +void BPF_STRUCT_OPS(sdt_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct scx_stats __arena *stats; + + stats = scx_task_data(p); + if (!stats) { + scx_bpf_error("%s: no stats for pid %d", __func__, p->pid); + return; + } + + stat_inc_enqueue(stats); + + scx_bpf_dsq_insert(p, SHARED_DSQ, SCX_SLICE_DFL, enq_flags); +} + +void BPF_STRUCT_OPS(sdt_dispatch, s32 cpu, struct task_struct *prev) +{ + scx_bpf_dsq_move_to_local(SHARED_DSQ, 0); +} + +s32 BPF_STRUCT_OPS_SLEEPABLE(sdt_init_task, struct task_struct *p, + struct scx_init_task_args *args) +{ + struct scx_stats __arena *stats; + + stats = scx_task_alloc(p); + if (!stats) { + scx_bpf_error("arena allocator out of memory"); + return -ENOMEM; + } + + stats->pid = p->pid; + + stat_inc_init(stats); + + return 0; +} + +void BPF_STRUCT_OPS(sdt_exit_task, struct task_struct *p, + struct scx_exit_task_args *args) +{ + struct scx_stats __arena *stats; + + stats = scx_task_data(p); + if (!stats) { + scx_bpf_error("%s: no stats for pid %d", __func__, p->pid); + return; + } + + stat_inc_exit(stats); + scx_stat_global_update(stats); + + scx_task_free(p); +} + +s32 BPF_STRUCT_OPS_SLEEPABLE(sdt_init) +{ + int ret; + + ret = scx_task_init(sizeof(struct scx_stats)); + if (ret < 0) { + scx_bpf_error("%s: failed with %d", __func__, ret); + return ret; + } + + ret = scx_bpf_create_dsq(SHARED_DSQ, -1); + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", SHARED_DSQ, ret); + return ret; + } + + return 0; +} + +void BPF_STRUCT_OPS(sdt_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +SCX_OPS_DEFINE(sdt_ops, + .select_cpu = (void *)sdt_select_cpu, + .enqueue = (void *)sdt_enqueue, + .dispatch = (void *)sdt_dispatch, + .init_task = (void *)sdt_init_task, + .exit_task = (void *)sdt_exit_task, + .init = (void *)sdt_init, + .exit = (void *)sdt_exit, + .name = "sdt"); diff --git a/tools/sched_ext/scx_sdt.c b/tools/sched_ext/scx_sdt.c new file mode 100644 index 000000000000..bf664b2d3785 --- /dev/null +++ b/tools/sched_ext/scx_sdt.c @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2024 Emil Tsalapatis <etsal@meta.com> + * Copyright (c) 2024 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#include <stdio.h> +#include <unistd.h> +#include <signal.h> +#include <libgen.h> +#include <bpf/bpf.h> +#include <scx/common.h> + +#include "scx_sdt.h" +#include "scx_sdt.bpf.skel.h" + +const char help_fmt[] = +"A simple arena-based sched_ext scheduler.\n" +"\n" +"Modified version of scx_simple that demonstrates arena-based data structures.\n" +"\n" +"Usage: %s [-v]\n" +"\n" +" -v Print libbpf debug messages\n" +" -h Display this help and exit\n"; + +static bool verbose; +static volatile int exit_req; + +static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static void sigint_handler(int sig) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_sdt *skel; + struct bpf_link *link; + __u32 opt; + __u64 ecode; + + libbpf_set_print(libbpf_print_fn); + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); +restart: + optind = 1; + skel = SCX_OPS_OPEN(sdt_ops, scx_sdt); + + while ((opt = getopt(argc, argv, "vh")) != -1) { + switch (opt) { + case 'v': + verbose = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + SCX_OPS_LOAD(skel, sdt_ops, scx_sdt, uei); + link = SCX_OPS_ATTACH(skel, sdt_ops, scx_sdt); + + while (!exit_req && !UEI_EXITED(skel, uei)) { + printf("====SCHEDULING STATS====\n"); + printf("enqueues=%llu\t", skel->bss->stat_enqueue); + printf("inits=%llu\t", skel->bss->stat_init); + printf("exits=%llu\t", skel->bss->stat_exit); + printf("\n"); + + printf("select_idle_cpu=%llu\t", skel->bss->stat_select_idle_cpu); + printf("select_busy_cpu=%llu\t", skel->bss->stat_select_busy_cpu); + printf("\n"); + + printf("====ALLOCATION STATS====\n"); + printf("chunk allocs=%llu\t", skel->bss->alloc_stats.chunk_allocs); + printf("data_allocs=%llu\n", skel->bss->alloc_stats.data_allocs); + printf("alloc_ops=%llu\t", skel->bss->alloc_stats.alloc_ops); + printf("free_ops=%llu\t", skel->bss->alloc_stats.free_ops); + printf("active_allocs=%llu\t", skel->bss->alloc_stats.active_allocs); + printf("arena_pages_used=%llu\t", skel->bss->alloc_stats.arena_pages_used); + printf("\n\n"); + + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + ecode = UEI_REPORT(skel, uei); + scx_sdt__destroy(skel); + + if (UEI_ECODE_RESTART(ecode)) + goto restart; + return 0; +} diff --git a/tools/sched_ext/scx_sdt.h b/tools/sched_ext/scx_sdt.h new file mode 100644 index 000000000000..67982ce9bc9b --- /dev/null +++ b/tools/sched_ext/scx_sdt.h @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2025 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2025 Tejun Heo <tj@kernel.org> + * Copyright (c) 2025 Emil Tsalapatis <etsal@meta.com> + */ +#pragma once + +#ifndef __BPF__ +#define __arena +#endif /* __BPF__ */ + +struct scx_alloc_stats { + __u64 chunk_allocs; + __u64 data_allocs; + __u64 alloc_ops; + __u64 free_ops; + __u64 active_allocs; + __u64 arena_pages_used; +}; + +struct sdt_pool { + void __arena *slab; + __u64 elem_size; + __u64 max_elems; + __u64 idx; +}; + +#ifndef div_round_up +#define div_round_up(a, b) (((a) + (b) - 1) / (b)) +#endif + +#ifndef round_up +#define round_up(a, b) (div_round_up((a), (b)) * (b)) +#endif + +typedef struct sdt_desc __arena sdt_desc_t; + +enum sdt_consts { + SDT_TASK_ENTS_PER_PAGE_SHIFT = 9, + SDT_TASK_LEVELS = 3, + SDT_TASK_ENTS_PER_CHUNK = 1 << SDT_TASK_ENTS_PER_PAGE_SHIFT, + SDT_TASK_CHUNK_BITMAP_U64S = div_round_up(SDT_TASK_ENTS_PER_CHUNK, 64), + SDT_TASK_MIN_ELEM_PER_ALLOC = 8, +}; + +union sdt_id { + __s64 val; + struct { + __s32 idx; /* index in the radix tree */ + __s32 genn; /* ++'d on recycle so that it forms unique'ish 64bit ID */ + }; +}; + +struct sdt_chunk; + +/* + * Each index page is described by the following descriptor which carries the + * bitmap. This way the actual index can host power-of-two numbers of entries + * which makes indexing cheaper. + */ +struct sdt_desc { + __u64 allocated[SDT_TASK_CHUNK_BITMAP_U64S]; + __u64 nr_free; + struct sdt_chunk __arena *chunk; +}; + +/* + * Leaf node containing per-task data. + */ +struct sdt_data { + union sdt_id tid; + __u64 payload[]; +}; + +/* + * Intermediate node pointing to another intermediate node or leaf node. + */ +struct sdt_chunk { + union { + sdt_desc_t * descs[SDT_TASK_ENTS_PER_CHUNK]; + struct sdt_data __arena *data[SDT_TASK_ENTS_PER_CHUNK]; + }; +}; + +struct scx_allocator { + struct sdt_pool pool; + sdt_desc_t *root; +}; + +struct scx_stats { + int seq; + pid_t pid; + __u64 enqueue; + __u64 exit; + __u64 init; + __u64 select_busy_cpu; + __u64 select_idle_cpu; +}; + +#ifdef __BPF__ + +void __arena *scx_task_data(struct task_struct *p); +int scx_task_init(__u64 data_size); +void __arena *scx_task_alloc(struct task_struct *p); +void scx_task_free(struct task_struct *p); +void scx_arena_subprog_init(void); + +int scx_alloc_init(struct scx_allocator *alloc, __u64 data_size); +u64 scx_alloc_internal(struct scx_allocator *alloc); +int scx_alloc_free_idx(struct scx_allocator *alloc, __u64 idx); + +#endif /* __BPF__ */ diff --git a/tools/sched_ext/scx_show_state.py b/tools/sched_ext/scx_show_state.py index b800d4f5f2e9..02e43c184d43 100644 --- a/tools/sched_ext/scx_show_state.py +++ b/tools/sched_ext/scx_show_state.py @@ -24,19 +24,21 @@ def read_atomic(name): def read_static_key(name): return prog[name].key.enabled.counter.value_() -def ops_state_str(state): - return prog['scx_ops_enable_state_str'][state].string_().decode() +def state_str(state): + return prog['scx_enable_state_str'][state].string_().decode() -ops = prog['scx_ops'] -enable_state = read_atomic("scx_ops_enable_state_var") +root = prog['scx_root'] +enable_state = read_atomic("scx_enable_state_var") -print(f'ops : {ops.name.string_().decode()}') -print(f'enabled : {read_static_key("__scx_ops_enabled")}') +if root: + print(f'ops : {root.ops.name.string_().decode()}') +else: + print('ops : ') +print(f'enabled : {read_static_key("__scx_enabled")}') print(f'switching_all : {read_int("scx_switching_all")}') print(f'switched_all : {read_static_key("__scx_switched_all")}') -print(f'enable_state : {ops_state_str(enable_state)} ({enable_state})') -print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}') -print(f'breather_depth: {read_atomic("scx_ops_breather_depth")}') -print(f'bypass_depth : {prog["scx_ops_bypass_depth"].value_()}') +print(f'enable_state : {state_str(enable_state)} ({enable_state})') +print(f'aborting : {prog["scx_aborting"].value_()}') +print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}') print(f'nr_rejected : {read_atomic("scx_nr_rejected")}') print(f'enable_seq : {read_atomic("scx_enable_seq")}') diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c index e6de99dba7db..cc40552b2b5f 100644 --- a/tools/sched_ext/scx_simple.bpf.c +++ b/tools/sched_ext/scx_simple.bpf.c @@ -89,7 +89,7 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev) { - scx_bpf_dsq_move_to_local(SHARED_DSQ); + scx_bpf_dsq_move_to_local(SHARED_DSQ, 0); } void BPF_STRUCT_OPS(simple_running, struct task_struct *p) @@ -121,17 +121,27 @@ void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable) * too much, determine the execution time by taking explicit timestamps * instead of depending on @p->scx.slice. */ - p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight; + u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice); + + scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta); } void BPF_STRUCT_OPS(simple_enable, struct task_struct *p) { - p->scx.dsq_vtime = vtime_now; + scx_bpf_task_set_dsq_vtime(p, vtime_now); } s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init) { - return scx_bpf_create_dsq(SHARED_DSQ, -1); + int ret; + + ret = scx_bpf_create_dsq(SHARED_DSQ, -1); + if (ret) { + scx_bpf_error("failed to create DSQ %d (%d)", SHARED_DSQ, ret); + return ret; + } + + return 0; } void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei) diff --git a/tools/sched_ext/scx_simple.c b/tools/sched_ext/scx_simple.c index 76d83199545c..c3b48611712b 100644 --- a/tools/sched_ext/scx_simple.c +++ b/tools/sched_ext/scx_simple.c @@ -7,6 +7,7 @@ #include <stdio.h> #include <unistd.h> #include <signal.h> +#include <assert.h> #include <libgen.h> #include <bpf/bpf.h> #include <scx/common.h> @@ -41,6 +42,7 @@ static void sigint_handler(int simple) static void read_stats(struct scx_simple *skel, __u64 *stats) { int nr_cpus = libbpf_num_possible_cpus(); + assert(nr_cpus > 0); __u64 cnts[2][nr_cpus]; __u32 idx; @@ -69,6 +71,7 @@ int main(int argc, char **argv) signal(SIGINT, sigint_handler); signal(SIGTERM, sigint_handler); restart: + optind = 1; skel = SCX_OPS_OPEN(simple_ops, scx_simple); while ((opt = getopt(argc, argv, "fvh")) != -1) { diff --git a/tools/sched_ext/scx_userland.bpf.c b/tools/sched_ext/scx_userland.bpf.c new file mode 100644 index 000000000000..f29862b89386 --- /dev/null +++ b/tools/sched_ext/scx_userland.bpf.c @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A minimal userland scheduler. + * + * In terms of scheduling, this provides two different types of behaviors: + * 1. A global FIFO scheduling order for _any_ tasks that have CPU affinity. + * All such tasks are direct-dispatched from the kernel, and are never + * enqueued in user space. + * 2. A primitive vruntime scheduler that is implemented in user space, for all + * other tasks. + * + * Some parts of this example user space scheduler could be implemented more + * efficiently using more complex and sophisticated data structures. For + * example, rather than using BPF_MAP_TYPE_QUEUE's, + * BPF_MAP_TYPE_{USER_}RINGBUF's could be used for exchanging messages between + * user space and kernel space. Similarly, we use a simple vruntime-sorted list + * in user space, but an rbtree could be used instead. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#include <scx/common.bpf.h> +#include "scx_userland.h" + +/* + * Maximum amount of tasks enqueued/dispatched between kernel and user-space. + */ +#define MAX_ENQUEUED_TASKS 4096 + +char _license[] SEC("license") = "GPL"; + +const volatile s32 usersched_pid; + +/* !0 for veristat, set during init */ +const volatile u32 num_possible_cpus = 64; + +/* Stats that are printed by user space. */ +u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues; + +/* + * Number of tasks that are queued for scheduling. + * + * This number is incremented by the BPF component when a task is queued to the + * user-space scheduler and it must be decremented by the user-space scheduler + * when a task is consumed. + */ +volatile u64 nr_queued; + +/* + * Number of tasks that are waiting for scheduling. + * + * This number must be updated by the user-space scheduler to keep track if + * there is still some scheduling work to do. + */ +volatile u64 nr_scheduled; + +UEI_DEFINE(uei); + +/* + * The map containing tasks that are enqueued in user space from the kernel. + * + * This map is drained by the user space scheduler. + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_ENQUEUED_TASKS); + __type(value, struct scx_userland_enqueued_task); +} enqueued SEC(".maps"); + +/* + * The map containing tasks that are dispatched to the kernel from user space. + * + * Drained by the kernel in userland_dispatch(). + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_ENQUEUED_TASKS); + __type(value, s32); +} dispatched SEC(".maps"); + +/* Per-task scheduling context */ +struct task_ctx { + bool force_local; /* Dispatch directly to local DSQ */ +}; + +/* Map that contains task-local storage. */ +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct task_ctx); +} task_ctx_stor SEC(".maps"); + +/* + * Flag used to wake-up the user-space scheduler. + */ +static volatile u32 usersched_needed; + +/* + * Set user-space scheduler wake-up flag (equivalent to an atomic release + * operation). + */ +static void set_usersched_needed(void) +{ + __sync_fetch_and_or(&usersched_needed, 1); +} + +/* + * Check and clear user-space scheduler wake-up flag (equivalent to an atomic + * acquire operation). + */ +static bool test_and_clear_usersched_needed(void) +{ + return __sync_fetch_and_and(&usersched_needed, 0) == 1; +} + +static bool is_usersched_task(const struct task_struct *p) +{ + return p->pid == usersched_pid; +} + +static bool keep_in_kernel(const struct task_struct *p) +{ + return p->nr_cpus_allowed < num_possible_cpus; +} + +static struct task_struct *usersched_task(void) +{ + struct task_struct *p; + + p = bpf_task_from_pid(usersched_pid); + /* + * Should never happen -- the usersched task should always be managed + * by sched_ext. + */ + if (!p) + scx_bpf_error("Failed to find usersched task %d", usersched_pid); + + return p; +} + +s32 BPF_STRUCT_OPS(userland_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + if (keep_in_kernel(p)) { + s32 cpu; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to look up task-local storage for %s", p->comm); + return -ESRCH; + } + + if (p->nr_cpus_allowed == 1 || + scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + tctx->force_local = true; + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr, 0); + if (cpu >= 0) { + tctx->force_local = true; + return cpu; + } + } + + return prev_cpu; +} + +static void dispatch_user_scheduler(void) +{ + struct task_struct *p; + + p = usersched_task(); + if (p) { + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } +} + +static void enqueue_task_in_user_space(struct task_struct *p, u64 enq_flags) +{ + struct scx_userland_enqueued_task task = {}; + + task.pid = p->pid; + task.sum_exec_runtime = p->se.sum_exec_runtime; + task.weight = p->scx.weight; + + if (bpf_map_push_elem(&enqueued, &task, 0)) { + /* + * If we fail to enqueue the task in user space, put it + * directly on the global DSQ. + */ + __sync_fetch_and_add(&nr_failed_enqueues, 1); + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } else { + __sync_fetch_and_add(&nr_user_enqueues, 1); + set_usersched_needed(); + } +} + +void BPF_STRUCT_OPS(userland_enqueue, struct task_struct *p, u64 enq_flags) +{ + if (keep_in_kernel(p)) { + u64 dsq_id = SCX_DSQ_GLOBAL; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to lookup task ctx for %s", p->comm); + return; + } + + if (tctx->force_local) + dsq_id = SCX_DSQ_LOCAL; + tctx->force_local = false; + scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags); + __sync_fetch_and_add(&nr_kernel_enqueues, 1); + return; + } else if (!is_usersched_task(p)) { + enqueue_task_in_user_space(p, enq_flags); + } +} + +void BPF_STRUCT_OPS(userland_dispatch, s32 cpu, struct task_struct *prev) +{ + if (test_and_clear_usersched_needed()) + dispatch_user_scheduler(); + + bpf_repeat(MAX_ENQUEUED_TASKS) { + s32 pid; + struct task_struct *p; + + if (bpf_map_pop_elem(&dispatched, &pid)) + break; + + /* + * The task could have exited by the time we get around to + * dispatching it. Treat this as a normal occurrence, and simply + * move onto the next iteration. + */ + p = bpf_task_from_pid(pid); + if (!p) + continue; + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } +} + +/* + * A CPU is about to change its idle state. If the CPU is going idle, ensure + * that the user-space scheduler has a chance to run if there is any remaining + * work to do. + */ +void BPF_STRUCT_OPS(userland_update_idle, s32 cpu, bool idle) +{ + /* + * Don't do anything if we exit from and idle state, a CPU owner will + * be assigned in .running(). + */ + if (!idle) + return; + /* + * A CPU is now available, notify the user-space scheduler that tasks + * can be dispatched, if there is at least one task waiting to be + * scheduled, either queued (accounted in nr_queued) or scheduled + * (accounted in nr_scheduled). + * + * NOTE: nr_queued is incremented by the BPF component, more exactly in + * enqueue(), when a task is sent to the user-space scheduler, then + * the scheduler drains the queued tasks (updating nr_queued) and adds + * them to its internal data structures / state; at this point tasks + * become "scheduled" and the user-space scheduler will take care of + * updating nr_scheduled accordingly; lastly tasks will be dispatched + * and the user-space scheduler will update nr_scheduled again. + * + * Checking both counters allows to determine if there is still some + * pending work to do for the scheduler: new tasks have been queued + * since last check, or there are still tasks "queued" or "scheduled" + * since the previous user-space scheduler run. If the counters are + * both zero it is pointless to wake-up the scheduler (even if a CPU + * becomes idle), because there is nothing to do. + * + * Keep in mind that update_idle() doesn't run concurrently with the + * user-space scheduler (that is single-threaded): this function is + * naturally serialized with the user-space scheduler code, therefore + * this check here is also safe from a concurrency perspective. + */ + if (nr_queued || nr_scheduled) { + /* + * Kick the CPU to make it immediately ready to accept + * dispatched tasks. + */ + set_usersched_needed(); + scx_bpf_kick_cpu(cpu, 0); + } +} + +s32 BPF_STRUCT_OPS(userland_init_task, struct task_struct *p, + struct scx_init_task_args *args) +{ + if (bpf_task_storage_get(&task_ctx_stor, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE)) + return 0; + else + return -ENOMEM; +} + +s32 BPF_STRUCT_OPS(userland_init) +{ + if (num_possible_cpus == 0) { + scx_bpf_error("User scheduler # CPUs uninitialized (%d)", + num_possible_cpus); + return -EINVAL; + } + + if (usersched_pid <= 0) { + scx_bpf_error("User scheduler pid uninitialized (%d)", + usersched_pid); + return -EINVAL; + } + + return 0; +} + +void BPF_STRUCT_OPS(userland_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +SCX_OPS_DEFINE(userland_ops, + .select_cpu = (void *)userland_select_cpu, + .enqueue = (void *)userland_enqueue, + .dispatch = (void *)userland_dispatch, + .update_idle = (void *)userland_update_idle, + .init_task = (void *)userland_init_task, + .init = (void *)userland_init, + .exit = (void *)userland_exit, + .flags = SCX_OPS_ENQ_LAST | + SCX_OPS_KEEP_BUILTIN_IDLE, + .name = "userland"); diff --git a/tools/sched_ext/scx_userland.c b/tools/sched_ext/scx_userland.c new file mode 100644 index 000000000000..616043c165e6 --- /dev/null +++ b/tools/sched_ext/scx_userland.c @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext user space scheduler which provides vruntime semantics + * using a simple ordered-list implementation. + * + * Each CPU in the system resides in a single, global domain. This precludes + * the need to do any load balancing between domains. The scheduler could + * easily be extended to support multiple domains, with load balancing + * happening in user space. + * + * Any task which has any CPU affinity is scheduled entirely in BPF. This + * program only schedules tasks which may run on any CPU. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#include <stdio.h> +#include <unistd.h> +#include <sched.h> +#include <signal.h> +#include <assert.h> +#include <libgen.h> +#include <pthread.h> +#include <bpf/bpf.h> +#include <sys/mman.h> +#include <sys/queue.h> +#include <sys/syscall.h> + +#include <scx/common.h> +#include "scx_userland.h" +#include "scx_userland.bpf.skel.h" + +const char help_fmt[] = +"A minimal userland sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Try to reduce `sysctl kernel.pid_max` if this program triggers OOMs.\n" +"\n" +"Usage: %s [-b BATCH] [-v]\n" +"\n" +" -b BATCH The number of tasks to batch when dispatching (default: 8)\n" +" -v Print libbpf debug messages\n" +" -h Display this help and exit\n"; + +/* Defined in UAPI */ +#define SCHED_EXT 7 + +/* Number of tasks to batch when dispatching to user space. */ +static __u32 batch_size = 8; + +static bool verbose; +static volatile int exit_req; +static int enqueued_fd, dispatched_fd; + +static pthread_t stats_printer; +static struct scx_userland *skel; +static struct bpf_link *ops_link; + +/* Stats collected in user space. */ +static __u64 nr_vruntime_enqueues, nr_vruntime_dispatches, nr_vruntime_failed; + +/* Number of tasks currently enqueued. */ +static __u64 nr_curr_enqueued; + +/* The data structure containing tasks that are enqueued in user space. */ +struct enqueued_task { + LIST_ENTRY(enqueued_task) entries; + __u64 sum_exec_runtime; + double vruntime; +}; + +/* + * Use a vruntime-sorted list to store tasks. This could easily be extended to + * a more optimal data structure, such as an rbtree as is done in CFS. We + * currently elect to use a sorted list to simplify the example for + * illustrative purposes. + */ +LIST_HEAD(listhead, enqueued_task); + +/* + * A vruntime-sorted list of tasks. The head of the list contains the task with + * the lowest vruntime. That is, the task that has the "highest" claim to be + * scheduled. + */ +static struct listhead vruntime_head = LIST_HEAD_INITIALIZER(vruntime_head); + +/* + * The main array of tasks. The array is allocated all at once during + * initialization, based on /proc/sys/kernel/pid_max, to avoid having to + * dynamically allocate memory on the enqueue path, which could cause a + * deadlock. A more substantive user space scheduler could e.g. provide a hook + * for newly enabled tasks that are passed to the scheduler from the + * .prep_enable() callback to allows the scheduler to allocate on safe paths. + */ +struct enqueued_task *tasks; +static int pid_max; + +static double min_vruntime; + +static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) +{ + if (level == LIBBPF_DEBUG && !verbose) + return 0; + return vfprintf(stderr, format, args); +} + +static void sigint_handler(int userland) +{ + exit_req = 1; +} + +static int get_pid_max(void) +{ + FILE *fp; + int pid_max; + + fp = fopen("/proc/sys/kernel/pid_max", "r"); + if (fp == NULL) { + fprintf(stderr, "Error opening /proc/sys/kernel/pid_max\n"); + return -1; + } + if (fscanf(fp, "%d", &pid_max) != 1) { + fprintf(stderr, "Error reading from /proc/sys/kernel/pid_max\n"); + fclose(fp); + return -1; + } + fclose(fp); + + return pid_max; +} + +static int init_tasks(void) +{ + pid_max = get_pid_max(); + if (pid_max < 0) + return pid_max; + + tasks = calloc(pid_max, sizeof(*tasks)); + if (!tasks) { + fprintf(stderr, "Error allocating tasks array\n"); + return -ENOMEM; + } + + return 0; +} + +static __u32 task_pid(const struct enqueued_task *task) +{ + return ((uintptr_t)task - (uintptr_t)tasks) / sizeof(*task); +} + +static int dispatch_task(__s32 pid) +{ + int err; + + err = bpf_map_update_elem(dispatched_fd, NULL, &pid, 0); + if (err) { + __atomic_add_fetch(&nr_vruntime_failed, 1, __ATOMIC_RELAXED); + } else { + __atomic_add_fetch(&nr_vruntime_dispatches, 1, __ATOMIC_RELAXED); + } + + return err; +} + +static struct enqueued_task *get_enqueued_task(__s32 pid) +{ + if (pid >= pid_max) + return NULL; + + return &tasks[pid]; +} + +static double calc_vruntime_delta(__u64 weight, __u64 delta) +{ + double weight_f = (double)weight / 100.0; + double delta_f = (double)delta; + + return delta_f / weight_f; +} + +static void update_enqueued(struct enqueued_task *enqueued, const struct scx_userland_enqueued_task *bpf_task) +{ + __u64 delta; + + delta = bpf_task->sum_exec_runtime - enqueued->sum_exec_runtime; + + enqueued->vruntime += calc_vruntime_delta(bpf_task->weight, delta); + if (min_vruntime > enqueued->vruntime) + enqueued->vruntime = min_vruntime; + enqueued->sum_exec_runtime = bpf_task->sum_exec_runtime; +} + +static int vruntime_enqueue(const struct scx_userland_enqueued_task *bpf_task) +{ + struct enqueued_task *curr, *enqueued, *prev; + + curr = get_enqueued_task(bpf_task->pid); + if (!curr) + return ENOENT; + + update_enqueued(curr, bpf_task); + __atomic_add_fetch(&nr_vruntime_enqueues, 1, __ATOMIC_RELAXED); + __atomic_add_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED); + + /* + * Enqueue the task in a vruntime-sorted list. A more optimal data + * structure such as an rbtree could easily be used as well. We elect + * to use a list here simply because it's less code, and thus the + * example is less convoluted and better serves to illustrate what a + * user space scheduler could look like. + */ + + if (LIST_EMPTY(&vruntime_head)) { + LIST_INSERT_HEAD(&vruntime_head, curr, entries); + return 0; + } + + LIST_FOREACH(enqueued, &vruntime_head, entries) { + if (curr->vruntime <= enqueued->vruntime) { + LIST_INSERT_BEFORE(enqueued, curr, entries); + return 0; + } + prev = enqueued; + } + + LIST_INSERT_AFTER(prev, curr, entries); + + return 0; +} + +static void drain_enqueued_map(void) +{ + while (1) { + struct scx_userland_enqueued_task task; + int err; + + if (bpf_map_lookup_and_delete_elem(enqueued_fd, NULL, &task)) { + skel->bss->nr_queued = 0; + skel->bss->nr_scheduled = nr_curr_enqueued; + return; + } + + err = vruntime_enqueue(&task); + if (err) { + fprintf(stderr, "Failed to enqueue task %d: %s\n", + task.pid, strerror(err)); + exit_req = 1; + return; + } + } +} + +static void dispatch_batch(void) +{ + __u32 i; + + for (i = 0; i < batch_size; i++) { + struct enqueued_task *task; + int err; + __s32 pid; + + task = LIST_FIRST(&vruntime_head); + if (!task) + break; + + min_vruntime = task->vruntime; + pid = task_pid(task); + LIST_REMOVE(task, entries); + err = dispatch_task(pid); + if (err) { + /* + * If we fail to dispatch, put the task back to the + * vruntime_head list and stop dispatching additional + * tasks in this batch. + */ + LIST_INSERT_HEAD(&vruntime_head, task, entries); + break; + } + __atomic_sub_fetch(&nr_curr_enqueued, 1, __ATOMIC_RELAXED); + } + skel->bss->nr_scheduled = __atomic_load_n(&nr_curr_enqueued, __ATOMIC_RELAXED); +} + +static void *run_stats_printer(void *arg) +{ + while (!exit_req) { + __u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues, total; + + nr_failed_enqueues = skel->bss->nr_failed_enqueues; + nr_kernel_enqueues = skel->bss->nr_kernel_enqueues; + nr_user_enqueues = skel->bss->nr_user_enqueues; + total = nr_failed_enqueues + nr_kernel_enqueues + nr_user_enqueues; + + printf("o-----------------------o\n"); + printf("| BPF ENQUEUES |\n"); + printf("|-----------------------|\n"); + printf("| kern: %10llu |\n", nr_kernel_enqueues); + printf("| user: %10llu |\n", nr_user_enqueues); + printf("| failed: %10llu |\n", nr_failed_enqueues); + printf("| -------------------- |\n"); + printf("| total: %10llu |\n", total); + printf("| |\n"); + printf("|-----------------------|\n"); + printf("| VRUNTIME / USER |\n"); + printf("|-----------------------|\n"); + printf("| enq: %10llu |\n", __atomic_load_n(&nr_vruntime_enqueues, __ATOMIC_RELAXED)); + printf("| disp: %10llu |\n", __atomic_load_n(&nr_vruntime_dispatches, __ATOMIC_RELAXED)); + printf("| failed: %10llu |\n", __atomic_load_n(&nr_vruntime_failed, __ATOMIC_RELAXED)); + printf("o-----------------------o\n"); + printf("\n\n"); + fflush(stdout); + sleep(1); + } + + return NULL; +} + +static int spawn_stats_thread(void) +{ + return pthread_create(&stats_printer, NULL, run_stats_printer, NULL); +} + +static void pre_bootstrap(int argc, char **argv) +{ + int err; + __u32 opt; + struct sched_param sched_param = { + .sched_priority = sched_get_priority_max(SCHED_EXT), + }; + + err = init_tasks(); + if (err) + exit(err); + + libbpf_set_print(libbpf_print_fn); + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + /* + * Enforce that the user scheduler task is managed by sched_ext. The + * task eagerly drains the list of enqueued tasks in its main work + * loop, and then yields the CPU. The BPF scheduler only schedules the + * user space scheduler task when at least one other task in the system + * needs to be scheduled. + */ + err = syscall(__NR_sched_setscheduler, getpid(), SCHED_EXT, &sched_param); + SCX_BUG_ON(err, "Failed to set scheduler to SCHED_EXT"); + + while ((opt = getopt(argc, argv, "b:vh")) != -1) { + switch (opt) { + case 'b': + batch_size = strtoul(optarg, NULL, 0); + break; + case 'v': + verbose = true; + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + exit(opt != 'h'); + } + } + + /* + * It's not always safe to allocate in a user space scheduler, as an + * enqueued task could hold a lock that we require in order to be able + * to allocate. + */ + err = mlockall(MCL_CURRENT | MCL_FUTURE); + SCX_BUG_ON(err, "Failed to prefault and lock address space"); +} + +static void bootstrap(char *comm) +{ + exit_req = 0; + min_vruntime = 0.0; + __atomic_store_n(&nr_vruntime_enqueues, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_vruntime_dispatches, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_vruntime_failed, 0, __ATOMIC_RELAXED); + __atomic_store_n(&nr_curr_enqueued, 0, __ATOMIC_RELAXED); + memset(tasks, 0, pid_max * sizeof(*tasks)); + LIST_INIT(&vruntime_head); + + skel = SCX_OPS_OPEN(userland_ops, scx_userland); + + skel->rodata->num_possible_cpus = libbpf_num_possible_cpus(); + assert(skel->rodata->num_possible_cpus > 0); + skel->rodata->usersched_pid = getpid(); + assert(skel->rodata->usersched_pid > 0); + + SCX_OPS_LOAD(skel, userland_ops, scx_userland, uei); + + enqueued_fd = bpf_map__fd(skel->maps.enqueued); + dispatched_fd = bpf_map__fd(skel->maps.dispatched); + assert(enqueued_fd > 0); + assert(dispatched_fd > 0); + + SCX_BUG_ON(spawn_stats_thread(), "Failed to spawn stats thread"); + + ops_link = SCX_OPS_ATTACH(skel, userland_ops, scx_userland); +} + +static void sched_main_loop(void) +{ + while (!exit_req) { + /* + * Perform the following work in the main user space scheduler + * loop: + * + * 1. Drain all tasks from the enqueued map, and enqueue them + * to the vruntime sorted list. + * + * 2. Dispatch a batch of tasks from the vruntime sorted list + * down to the kernel. + * + * 3. Yield the CPU back to the system. The BPF scheduler will + * reschedule the user space scheduler once another task has + * been enqueued to user space. + */ + drain_enqueued_map(); + dispatch_batch(); + sched_yield(); + } +} + +int main(int argc, char **argv) +{ + __u64 ecode; + + pre_bootstrap(argc, argv); +restart: + bootstrap(argv[0]); + sched_main_loop(); + + exit_req = 1; + bpf_link__destroy(ops_link); + pthread_join(stats_printer, NULL); + ecode = UEI_REPORT(skel, uei); + scx_userland__destroy(skel); + + if (UEI_ECODE_RESTART(ecode)) + goto restart; + return 0; +} diff --git a/tools/sched_ext/scx_userland.h b/tools/sched_ext/scx_userland.h new file mode 100644 index 000000000000..684fb2dd5de9 --- /dev/null +++ b/tools/sched_ext/scx_userland.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta, Inc */ + +#ifndef __SCX_USERLAND_COMMON_H +#define __SCX_USERLAND_COMMON_H + +/* + * An instance of a task that has been enqueued by the kernel for consumption + * by a user space global scheduler thread. + */ +struct scx_userland_enqueued_task { + __s32 pid; + u64 sum_exec_runtime; + u64 weight; +}; + +#endif // __SCX_USERLAND_COMMON_H |
