summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h30
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/x86/include/asm/amd-ibs.h3
-rw-r--r--tools/arch/x86/include/asm/asm.h8
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h9
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h161
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/asm/nops.h2
-rw-r--r--tools/arch/x86/include/asm/orc_types.h4
-rw-r--r--tools/arch/x86/include/asm/pvclock-abi.h4
-rw-r--r--tools/arch/x86/include/asm/required-features.h105
-rw-r--r--tools/include/linux/bits.h2
-rw-r--r--tools/include/nolibc/Makefile2
-rw-r--r--tools/include/nolibc/arch-mips.h1
-rw-r--r--tools/include/nolibc/arch-s390.h9
-rw-r--r--tools/include/nolibc/arch.h2
-rw-r--r--tools/include/nolibc/crt.h2
-rw-r--r--tools/include/nolibc/dirent.h98
-rw-r--r--tools/include/nolibc/errno.h2
-rw-r--r--tools/include/nolibc/limits.h7
-rw-r--r--tools/include/nolibc/nolibc.h4
-rw-r--r--tools/include/nolibc/signal.h1
-rw-r--r--tools/include/nolibc/stackprotector.h2
-rw-r--r--tools/include/nolibc/stdio.h98
-rw-r--r--tools/include/nolibc/stdlib.h1
-rw-r--r--tools/include/nolibc/string.h4
-rw-r--r--tools/include/nolibc/sys.h83
-rw-r--r--tools/include/uapi/linux/const.h2
-rw-r--r--tools/include/uapi/linux/elf.h524
-rw-r--r--tools/memory-model/Documentation/glossary.txt32
-rw-r--r--tools/memory-model/Documentation/herd-representation.txt49
-rw-r--r--tools/memory-model/README4
-rw-r--r--tools/memory-model/linux-kernel.bell33
-rw-r--r--tools/memory-model/linux-kernel.cat10
-rw-r--r--tools/memory-model/linux-kernel.cfg1
-rw-r--r--tools/memory-model/linux-kernel.def169
-rw-r--r--tools/objtool/Documentation/objtool.txt105
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/arch/loongarch/decode.c28
-rw-r--r--tools/objtool/arch/loongarch/include/arch/elf.h7
-rw-r--r--tools/objtool/arch/loongarch/special.c159
-rw-r--r--tools/objtool/arch/powerpc/decode.c14
-rw-r--r--tools/objtool/arch/x86/decode.c14
-rw-r--r--tools/objtool/builtin-check.c208
-rw-r--r--tools/objtool/check.c95
-rw-r--r--tools/objtool/elf.c3
-rw-r--r--tools/objtool/include/objtool/arch.h3
-rw-r--r--tools/objtool/include/objtool/builtin.h3
-rw-r--r--tools/objtool/include/objtool/elf.h2
-rw-r--r--tools/objtool/include/objtool/warn.h20
-rw-r--r--tools/objtool/noreturns.h2
-rw-r--r--tools/objtool/objtool.c78
-rw-r--r--tools/objtool/orc_dump.c7
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/power/cpupower/Makefile19
-rw-r--r--tools/power/cpupower/bench/parse.c4
-rw-r--r--tools/power/cpupower/lib/cpupower.c48
-rw-r--r--tools/power/cpupower/lib/cpupower.h3
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c48
-rw-r--r--tools/power/x86/turbostat/turbostat.c2
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h34
-rw-r--r--tools/sched_ext/include/scx/common.h1
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h95
-rw-r--r--tools/sched_ext/include/scx/compat.h16
-rw-r--r--tools/sched_ext/include/scx/enum_defs.autogen.h120
-rw-r--r--tools/sched_ext/scx_central.c15
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c23
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/arm64/fp/kernel-test.c1
-rw-r--r--tools/testing/selftests/arm64/mte/check_hugetlb_options.c19
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/Makefile6
-rw-r--r--tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c516
-rw-r--r--tools/testing/selftests/filesystems/nsfs/iterate_mntns.c14
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/Makefile11
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c507
-rw-r--r--tools/testing/selftests/filesystems/overlayfs/wrappers.h17
-rw-r--r--tools/testing/selftests/filesystems/statmount/statmount.h2
-rw-r--r--tools/testing/selftests/filesystems/utils.c501
-rw-r--r--tools/testing/selftests/filesystems/utils.h45
-rw-r--r--tools/testing/selftests/kselftest.h5
-rwxr-xr-xtools/testing/selftests/kselftest/module.sh2
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm1
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c2
-rw-r--r--tools/testing/selftests/kvm/arm64/get-reg-list.c1
-rw-r--r--tools/testing/selftests/kvm/arm64/hypercalls.c46
-rw-r--r--tools/testing/selftests/kvm/arm64/set_id_regs.c40
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c523
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h33
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h2
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h50
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c28
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c114
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/sbi_pmu_test.c81
-rw-r--r--tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86/hyperv_ipi.c6
-rw-r--r--tools/testing/selftests/kvm/x86/nested_emulation_test.c146
-rw-r--r--tools/testing/selftests/kvm/x86/nx_huge_pages_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c158
-rw-r--r--tools/testing/selftests/kvm/x86/svm_int_ctl_test.c5
-rw-r--r--tools/testing/selftests/kvm/x86/ucna_injection_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_ipi_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86/xapic_state_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86/xen_shinfo_test.c5
-rw-r--r--tools/testing/selftests/lib.mk5
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rw-r--r--tools/testing/selftests/lib/config3
-rwxr-xr-xtools/testing/selftests/lib/prime_numbers.sh4
-rwxr-xr-xtools/testing/selftests/lib/printf.sh4
-rwxr-xr-xtools/testing/selftests/lib/scanf.sh4
-rw-r--r--tools/testing/selftests/mm/guard-pages.c16
-rw-r--r--tools/testing/selftests/mm/mseal_test.c6
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h3
-rw-r--r--tools/testing/selftests/mm/pkey_sighandler_tests.c4
-rw-r--r--tools/testing/selftests/mm/protection_keys.c2
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c652
-rw-r--r--tools/testing/selftests/nolibc/Makefile30
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test-linkage.c6
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c138
-rwxr-xr-xtools/testing/selftests/nolibc/run-tests.sh26
-rw-r--r--tools/testing/selftests/pidfd/.gitignore2
-rw-r--r--tools/testing/selftests/pidfd/Makefile4
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h109
-rw-r--r--tools/testing/selftests/pidfd/pidfd_exec_helper.c12
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_info_test.c692
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c30
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c45
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c76
-rw-r--r--tools/testing/selftests/powerpc/include/pkeys.h5
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c2
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c6
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/srcu_lockdep.sh2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE073
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE103
-rw-r--r--tools/testing/selftests/rseq/.gitignore1
-rw-r--r--tools/testing/selftests/rseq/Makefile9
-rw-r--r--tools/testing/selftests/rseq/rseq.c27
-rw-r--r--tools/testing/selftests/rseq/rseq.h5
-rwxr-xr-xtools/testing/selftests/rseq/run_syscall_errors_test.sh5
-rw-r--r--tools/testing/selftests/rseq/syscall_errors_test.c124
-rw-r--r--tools/testing/selftests/sched/config2
-rw-r--r--tools/testing/selftests/sched_ext/Makefile1
-rw-r--r--tools/testing/selftests/sched_ext/config1
-rw-r--r--tools/testing/selftests/sched_ext/numa.bpf.c100
-rw-r--r--tools/testing/selftests/sched_ext/numa.c59
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c6
-rw-r--r--tools/testing/selftests/timers/posix_timers.c73
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/vDSO/Makefile11
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c19
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.h1
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c150
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c4
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config1
-rw-r--r--tools/testing/selftests/x86/Makefile6
-rw-r--r--tools/testing/selftests/x86/amx.c442
-rw-r--r--tools/testing/selftests/x86/avx.c12
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c14
-rw-r--r--tools/testing/selftests/x86/entry_from_vm86.c24
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c24
-rw-r--r--tools/testing/selftests/x86/helpers.h28
-rw-r--r--tools/testing/selftests/x86/ioperm.c25
-rw-r--r--tools/testing/selftests/x86/iopl.c25
-rw-r--r--tools/testing/selftests/x86/lam.c151
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c18
-rw-r--r--tools/testing/selftests/x86/mov_ss_trap.c14
-rw-r--r--tools/testing/selftests/x86/ptrace_syscall.c24
-rw-r--r--tools/testing/selftests/x86/sigaltstack.c26
-rw-r--r--tools/testing/selftests/x86/sigreturn.c24
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c22
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c12
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c12
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c3
-rw-r--r--tools/testing/selftests/x86/sysret_rip.c24
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c13
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c12
-rw-r--r--tools/testing/selftests/x86/xstate.c477
-rw-r--r--tools/testing/selftests/x86/xstate.h195
185 files changed, 7435 insertions, 2292 deletions
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 03cd7c19a683..d5dd96902817 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -246,6 +246,7 @@ struct kvm_vcpu_events {
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
+#define KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ 9
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index 150416682e2c..b6c5ece4fdee 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -558,9 +558,6 @@
#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
-#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
-#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
-#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
@@ -981,10 +978,6 @@
#define SYS_MPIDR_SAFE_VAL (BIT(31))
/* GIC Hypervisor interface registers */
-/* ICH_MISR_EL2 bit definitions */
-#define ICH_MISR_EOI (1 << 0)
-#define ICH_MISR_U (1 << 1)
-
/* ICH_LR*_EL2 bit definitions */
#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
@@ -999,17 +992,6 @@
#define ICH_LR_PRIORITY_SHIFT 48
#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
-/* ICH_HCR_EL2 bit definitions */
-#define ICH_HCR_EN (1 << 0)
-#define ICH_HCR_UIE (1 << 1)
-#define ICH_HCR_NPIE (1 << 3)
-#define ICH_HCR_TC (1 << 10)
-#define ICH_HCR_TALL0 (1 << 11)
-#define ICH_HCR_TALL1 (1 << 12)
-#define ICH_HCR_TDIR (1 << 14)
-#define ICH_HCR_EOIcount_SHIFT 27
-#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
-
/* ICH_VMCR_EL2 bit definitions */
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
@@ -1030,18 +1012,6 @@
#define ICH_VMCR_ENG1_SHIFT 1
#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
-/* ICH_VTR_EL2 bit definitions */
-#define ICH_VTR_PRI_BITS_SHIFT 29
-#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
-#define ICH_VTR_ID_BITS_SHIFT 23
-#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
-#define ICH_VTR_SEIS_SHIFT 22
-#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
-#define ICH_VTR_A3V_SHIFT 21
-#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
-#define ICH_VTR_TDS_SHIFT 19
-#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
-
/*
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 66736ff04011..6d44f8c8a18f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -374,6 +374,7 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 0-63 */
#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
enum {
@@ -384,6 +385,17 @@ enum {
#endif
};
+/* Vendor hyper call function numbers 64-127 */
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2 KVM_REG_ARM_FW_FEAT_BMAP_REG(3)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_VER = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_DISCOVER_IMPL_CPUS = 1,
+#ifdef __KERNEL__
+ KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT,
+#endif
+};
+
/* Device Control API on vm fd */
#define KVM_ARM_VM_SMCCC_CTRL 0
#define KVM_ARM_VM_SMCCC_FILTER 0
diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd-ibs.h
index 93807b437e4d..cb1740bc3da2 100644
--- a/tools/arch/x86/include/asm/amd-ibs.h
+++ b/tools/arch/x86/include/asm/amd-ibs.h
@@ -64,7 +64,8 @@ union ibs_op_ctl {
opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
reserved0:5, /* 27-31: reserved */
opcurcnt:27, /* 32-58: periodic op counter current count */
- reserved1:5; /* 59-63: reserved */
+ ldlat_thrsh:4, /* 59-62: Load Latency threshold */
+ ldlat_en:1; /* 63: Load Latency enabled */
};
};
diff --git a/tools/arch/x86/include/asm/asm.h b/tools/arch/x86/include/asm/asm.h
index 3ad3da9a7d97..dbe39b44256b 100644
--- a/tools/arch/x86/include/asm/asm.h
+++ b/tools/arch/x86/include/asm/asm.h
@@ -2,7 +2,7 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define __ASM_FORM(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
@@ -123,7 +123,7 @@
#ifdef __KERNEL__
/* Exception table entry */
-#ifdef __ASSEMBLY__
+#ifdef __ASSEMBLER__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
.pushsection "__ex_table","a" ; \
.balign 4 ; \
@@ -154,7 +154,7 @@
# define _ASM_NOKPROBE(entry)
# endif
-#else /* ! __ASSEMBLY__ */
+#else /* ! __ASSEMBLER__ */
# define _EXPAND_EXTABLE_HANDLE(x) #x
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
" .pushsection \"__ex_table\",\"a\"\n" \
@@ -186,7 +186,7 @@
*/
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* __KERNEL__ */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 17b6590748c0..9e3fa7942e7d 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -2,14 +2,6 @@
#ifndef _ASM_X86_CPUFEATURES_H
#define _ASM_X86_CPUFEATURES_H
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#include <asm/required-features.h>
-#endif
-
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#include <asm/disabled-features.h>
-#endif
-
/*
* Defines x86 CPU feature bits
*/
@@ -210,7 +202,6 @@
#define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */
-#define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
deleted file mode 100644
index c492bdc97b05..000000000000
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ /dev/null
@@ -1,161 +0,0 @@
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#define _ASM_X86_DISABLED_FEATURES_H
-
-/* These features, although they might be available in a CPU
- * will not be used because the compile options to support
- * them are not present.
- *
- * This code allows them to be checked and disabled at
- * compile time without an explicit #ifdef. Use
- * cpu_feature_enabled().
- */
-
-#ifdef CONFIG_X86_UMIP
-# define DISABLE_UMIP 0
-#else
-# define DISABLE_UMIP (1<<(X86_FEATURE_UMIP & 31))
-#endif
-
-#ifdef CONFIG_X86_64
-# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
-# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
-# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
-# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
-# define DISABLE_PCID 0
-#else
-# define DISABLE_VME 0
-# define DISABLE_K6_MTRR 0
-# define DISABLE_CYRIX_ARR 0
-# define DISABLE_CENTAUR_MCR 0
-# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
-#endif /* CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-# define DISABLE_PKU 0
-# define DISABLE_OSPKE 0
-#else
-# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
-# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
-#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
-
-#ifdef CONFIG_X86_5LEVEL
-# define DISABLE_LA57 0
-#else
-# define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
-# define DISABLE_PTI 0
-#else
-# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_RETPOLINE
-# define DISABLE_RETPOLINE 0
-#else
-# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
- (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
-#endif
-
-#ifdef CONFIG_MITIGATION_RETHUNK
-# define DISABLE_RETHUNK 0
-#else
-# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_UNRET_ENTRY
-# define DISABLE_UNRET 0
-#else
-# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
-#endif
-
-#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING
-# define DISABLE_CALL_DEPTH_TRACKING 0
-#else
-# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
-#endif
-
-#ifdef CONFIG_ADDRESS_MASKING
-# define DISABLE_LAM 0
-#else
-# define DISABLE_LAM (1 << (X86_FEATURE_LAM & 31))
-#endif
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
-
-#ifdef CONFIG_X86_SGX
-# define DISABLE_SGX 0
-#else
-# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
-#endif
-
-#ifdef CONFIG_XEN_PV
-# define DISABLE_XENPV 0
-#else
-# define DISABLE_XENPV (1 << (X86_FEATURE_XENPV & 31))
-#endif
-
-#ifdef CONFIG_INTEL_TDX_GUEST
-# define DISABLE_TDX_GUEST 0
-#else
-# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
-#endif
-
-#ifdef CONFIG_X86_USER_SHADOW_STACK
-#define DISABLE_USER_SHSTK 0
-#else
-#define DISABLE_USER_SHSTK (1 << (X86_FEATURE_USER_SHSTK & 31))
-#endif
-
-#ifdef CONFIG_X86_KERNEL_IBT
-#define DISABLE_IBT 0
-#else
-#define DISABLE_IBT (1 << (X86_FEATURE_IBT & 31))
-#endif
-
-#ifdef CONFIG_X86_FRED
-# define DISABLE_FRED 0
-#else
-# define DISABLE_FRED (1 << (X86_FEATURE_FRED & 31))
-#endif
-
-#ifdef CONFIG_KVM_AMD_SEV
-#define DISABLE_SEV_SNP 0
-#else
-#define DISABLE_SEV_SNP (1 << (X86_FEATURE_SEV_SNP & 31))
-#endif
-
-/*
- * Make sure to add features to the correct mask
- */
-#define DISABLED_MASK0 (DISABLE_VME)
-#define DISABLED_MASK1 0
-#define DISABLED_MASK2 0
-#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
-#define DISABLED_MASK4 (DISABLE_PCID)
-#define DISABLED_MASK5 0
-#define DISABLED_MASK6 0
-#define DISABLED_MASK7 (DISABLE_PTI)
-#define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST)
-#define DISABLED_MASK9 (DISABLE_SGX)
-#define DISABLED_MASK10 0
-#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
- DISABLE_CALL_DEPTH_TRACKING|DISABLE_USER_SHSTK)
-#define DISABLED_MASK12 (DISABLE_FRED|DISABLE_LAM)
-#define DISABLED_MASK13 0
-#define DISABLED_MASK14 0
-#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
- DISABLE_ENQCMD)
-#define DISABLED_MASK17 0
-#define DISABLED_MASK18 (DISABLE_IBT)
-#define DISABLED_MASK19 (DISABLE_SEV_SNP)
-#define DISABLED_MASK20 0
-#define DISABLED_MASK21 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
-
-#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 3ae84c3b8e6d..dc1c1057f26e 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -25,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_TCE 15 /* Enable Translation Cache Extensions */
#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
@@ -34,6 +35,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_TCE (1<<_EFER_TCE)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/*
diff --git a/tools/arch/x86/include/asm/nops.h b/tools/arch/x86/include/asm/nops.h
index 1c1b7550fa55..cd94221d8335 100644
--- a/tools/arch/x86/include/asm/nops.h
+++ b/tools/arch/x86/include/asm/nops.h
@@ -82,7 +82,7 @@
#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern const unsigned char * const x86_nops[];
#endif
diff --git a/tools/arch/x86/include/asm/orc_types.h b/tools/arch/x86/include/asm/orc_types.h
index 46d7e06763c9..e0125afa53fb 100644
--- a/tools/arch/x86/include/asm/orc_types.h
+++ b/tools/arch/x86/include/asm/orc_types.h
@@ -45,7 +45,7 @@
#define ORC_TYPE_REGS 3
#define ORC_TYPE_REGS_PARTIAL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
#include <asm/byteorder.h>
/*
@@ -73,6 +73,6 @@ struct orc_entry {
#endif
} __packed;
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ORC_TYPES_H */
diff --git a/tools/arch/x86/include/asm/pvclock-abi.h b/tools/arch/x86/include/asm/pvclock-abi.h
index 1436226efe3e..b9fece5fc96d 100644
--- a/tools/arch/x86/include/asm/pvclock-abi.h
+++ b/tools/arch/x86/include/asm/pvclock-abi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PVCLOCK_ABI_H
#define _ASM_X86_PVCLOCK_ABI_H
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
/*
* These structs MUST NOT be changed.
@@ -44,5 +44,5 @@ struct pvclock_wall_clock {
#define PVCLOCK_GUEST_STOPPED (1 << 1)
/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLER__ */
#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
deleted file mode 100644
index e9187ddd3d1f..000000000000
--- a/tools/arch/x86/include/asm/required-features.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#define _ASM_X86_REQUIRED_FEATURES_H
-
-/* Define minimum CPUID feature set for kernel These bits are checked
- really early to actually display a visible error message before the
- kernel dies. Make sure to assign features to the proper mask!
-
- Some requirements that are not in CPUID yet are also in the
- CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
-
- The real information is in arch/x86/Kconfig.cpu, this just converts
- the CONFIGs into a bitmask */
-
-#ifndef CONFIG_MATH_EMULATION
-# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
-#else
-# define NEED_FPU 0
-#endif
-
-#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
-#else
-# define NEED_PAE 0
-#endif
-
-#ifdef CONFIG_X86_CMPXCHG64
-# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
-#else
-# define NEED_CX8 0
-#endif
-
-#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
-# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
-#else
-# define NEED_CMOV 0
-#endif
-
-# define NEED_3DNOW 0
-
-#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
-# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
-#else
-# define NEED_NOPL 0
-#endif
-
-#ifdef CONFIG_MATOM
-# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
-#else
-# define NEED_MOVBE 0
-#endif
-
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT_XXL
-/* Paravirtualized systems may not have PSE or PGE available */
-#define NEED_PSE 0
-#define NEED_PGE 0
-#else
-#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
-#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
-#endif
-#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
-#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
-#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
-#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
-#define NEED_LM (1<<(X86_FEATURE_LM & 31))
-#else
-#define NEED_PSE 0
-#define NEED_MSR 0
-#define NEED_PGE 0
-#define NEED_FXSR 0
-#define NEED_XMM 0
-#define NEED_XMM2 0
-#define NEED_LM 0
-#endif
-
-#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
- NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
- NEED_XMM|NEED_XMM2)
-#define SSE_MASK (NEED_XMM|NEED_XMM2)
-
-#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
-
-#define REQUIRED_MASK2 0
-#define REQUIRED_MASK3 (NEED_NOPL)
-#define REQUIRED_MASK4 (NEED_MOVBE)
-#define REQUIRED_MASK5 0
-#define REQUIRED_MASK6 0
-#define REQUIRED_MASK7 0
-#define REQUIRED_MASK8 0
-#define REQUIRED_MASK9 0
-#define REQUIRED_MASK10 0
-#define REQUIRED_MASK11 0
-#define REQUIRED_MASK12 0
-#define REQUIRED_MASK13 0
-#define REQUIRED_MASK14 0
-#define REQUIRED_MASK15 0
-#define REQUIRED_MASK16 0
-#define REQUIRED_MASK17 0
-#define REQUIRED_MASK18 0
-#define REQUIRED_MASK19 0
-#define REQUIRED_MASK20 0
-#define REQUIRED_MASK21 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
-
-#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 60044b608817..8de2914e6510 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -41,7 +41,7 @@
* Missing asm support
*
* __GENMASK_U128() depends on _BIT128() which would not work
- * in the asm code, as it shifts an 'unsigned __init128' data
+ * in the asm code, as it shifts an 'unsigned __int128' data
* type instead of direct representation of 128 bit constants
* such as long and unsigned long. The fundamental problem is
* that a 128 bit constant will get silently truncated by the
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
index a1f55fb24bb3..f9702877ac21 100644
--- a/tools/include/nolibc/Makefile
+++ b/tools/include/nolibc/Makefile
@@ -29,7 +29,9 @@ all_files := \
compiler.h \
crt.h \
ctype.h \
+ dirent.h \
errno.h \
+ limits.h \
nolibc.h \
signal.h \
stackprotector.h \
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
index 1791a8ce58da..753a8ed2cf69 100644
--- a/tools/include/nolibc/arch-mips.h
+++ b/tools/include/nolibc/arch-mips.h
@@ -179,6 +179,7 @@
})
/* startup code, note that it's called __start on MIPS */
+void __start(void);
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector __start(void)
{
__asm__ volatile (
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
index f9ab83a219b8..df4c3cc713ac 100644
--- a/tools/include/nolibc/arch-s390.h
+++ b/tools/include/nolibc/arch-s390.h
@@ -5,8 +5,8 @@
#ifndef _NOLIBC_ARCH_S390_H
#define _NOLIBC_ARCH_S390_H
-#include <asm/signal.h>
-#include <asm/unistd.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
#include "compiler.h"
#include "crt.h"
@@ -143,8 +143,13 @@
void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
{
__asm__ volatile (
+#ifdef __s390x__
"lgr %r2, %r15\n" /* save stack pointer to %r2, as arg1 of _start_c */
"aghi %r15, -160\n" /* allocate new stackframe */
+#else
+ "lr %r2, %r15\n"
+ "ahi %r15, -96\n"
+#endif
"xc 0(8,%r15), 0(%r15)\n" /* clear backchain */
"brasl %r14, _start_c\n" /* transfer to c runtime */
);
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
index c8f4e5d3add9..8a2c143c0fba 100644
--- a/tools/include/nolibc/arch.h
+++ b/tools/include/nolibc/arch.h
@@ -29,7 +29,7 @@
#include "arch-powerpc.h"
#elif defined(__riscv)
#include "arch-riscv.h"
-#elif defined(__s390x__)
+#elif defined(__s390x__) || defined(__s390__)
#include "arch-s390.h"
#elif defined(__loongarch__)
#include "arch-loongarch.h"
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index bbcd5fd09806..c4b10103bbec 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -10,6 +10,7 @@
char **environ __attribute__((weak));
const unsigned long *_auxv __attribute__((weak));
+void _start(void);
static void __stack_chk_init(void);
static void exit(int);
@@ -22,6 +23,7 @@ extern void (*const __init_array_end[])(int, char **, char**) __attribute__((wea
extern void (*const __fini_array_start[])(void) __attribute__((weak));
extern void (*const __fini_array_end[])(void) __attribute__((weak));
+void _start_c(long *sp);
__attribute__((weak,used))
void _start_c(long *sp)
{
diff --git a/tools/include/nolibc/dirent.h b/tools/include/nolibc/dirent.h
new file mode 100644
index 000000000000..c5c30d0dd680
--- /dev/null
+++ b/tools/include/nolibc/dirent.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Directory access for NOLIBC
+ * Copyright (C) 2025 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#ifndef _NOLIBC_DIRENT_H
+#define _NOLIBC_DIRENT_H
+
+#include "stdint.h"
+#include "types.h"
+
+#include <linux/limits.h>
+
+struct dirent {
+ ino_t d_ino;
+ char d_name[NAME_MAX + 1];
+};
+
+/* See comment of FILE in stdio.h */
+typedef struct {
+ char dummy[1];
+} DIR;
+
+static __attribute__((unused))
+DIR *fdopendir(int fd)
+{
+ if (fd < 0) {
+ SET_ERRNO(EBADF);
+ return NULL;
+ }
+ return (DIR *)(intptr_t)~fd;
+}
+
+static __attribute__((unused))
+DIR *opendir(const char *name)
+{
+ int fd;
+
+ fd = open(name, O_RDONLY);
+ if (fd == -1)
+ return NULL;
+ return fdopendir(fd);
+}
+
+static __attribute__((unused))
+int closedir(DIR *dirp)
+{
+ intptr_t i = (intptr_t)dirp;
+
+ if (i >= 0) {
+ SET_ERRNO(EBADF);
+ return -1;
+ }
+ return close(~i);
+}
+
+static __attribute__((unused))
+int readdir_r(DIR *dirp, struct dirent *entry, struct dirent **result)
+{
+ char buf[sizeof(struct linux_dirent64) + NAME_MAX + 1];
+ struct linux_dirent64 *ldir = (void *)buf;
+ intptr_t i = (intptr_t)dirp;
+ int fd, ret;
+
+ if (i >= 0)
+ return EBADF;
+
+ fd = ~i;
+
+ ret = sys_getdents64(fd, ldir, sizeof(buf));
+ if (ret < 0)
+ return -ret;
+ if (ret == 0) {
+ *result = NULL;
+ return 0;
+ }
+
+ /*
+ * getdents64() returns as many entries as fit the buffer.
+ * readdir() can only return one entry at a time.
+ * Make sure the non-returned ones are not skipped.
+ */
+ ret = lseek(fd, ldir->d_off, SEEK_SET);
+ if (ret == -1)
+ return errno;
+
+ entry->d_ino = ldir->d_ino;
+ /* the destination should always be big enough */
+ strlcpy(entry->d_name, ldir->d_name, sizeof(entry->d_name));
+ *result = entry;
+ return 0;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_DIRENT_H */
diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
index a44486ff0477..1d8d8033e8ff 100644
--- a/tools/include/nolibc/errno.h
+++ b/tools/include/nolibc/errno.h
@@ -7,7 +7,7 @@
#ifndef _NOLIBC_ERRNO_H
#define _NOLIBC_ERRNO_H
-#include <asm/errno.h>
+#include <linux/errno.h>
#ifndef NOLIBC_IGNORE_ERRNO
#define SET_ERRNO(v) do { errno = (v); } while (0)
diff --git a/tools/include/nolibc/limits.h b/tools/include/nolibc/limits.h
new file mode 100644
index 000000000000..306d4141f4d2
--- /dev/null
+++ b/tools/include/nolibc/limits.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Shim limits.h header for NOLIBC.
+ * Copyright (C) 2025 Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+ */
+
+#include "nolibc.h"
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index 92436b1e4441..70872401aca8 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -31,8 +31,7 @@
* - The third level is the libc call definition. It exposes the lower raw
* sys_<name>() calls in a way that looks like what a libc usually does,
* takes care of specific input values, and of setting errno upon error.
- * There can be minor variations compared to standard libc calls. For
- * example the open() call always takes 3 args here.
+ * There can be minor variations compared to standard libc calls.
*
* The errno variable is declared static and unused. This way it can be
* optimized away if not used. However this means that a program made of
@@ -105,6 +104,7 @@
#include "string.h"
#include "time.h"
#include "stackprotector.h"
+#include "dirent.h"
/* Used by programs to avoid std includes */
#define NOLIBC
diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
index 137552216e46..cdcc5904c51e 100644
--- a/tools/include/nolibc/signal.h
+++ b/tools/include/nolibc/signal.h
@@ -13,6 +13,7 @@
#include "sys.h"
/* This one is not marked static as it's needed by libgcc for divide by zero */
+int raise(int signal);
__attribute__((weak,unused,section(".text.nolibc_raise")))
int raise(int signal)
{
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
index 1d0d5259ec41..c71a2c257177 100644
--- a/tools/include/nolibc/stackprotector.h
+++ b/tools/include/nolibc/stackprotector.h
@@ -18,6 +18,7 @@
* triggering stack protector errors themselves
*/
+void __stack_chk_fail(void);
__attribute__((weak,used,noreturn,section(".text.nolibc_stack_chk")))
void __stack_chk_fail(void)
{
@@ -28,6 +29,7 @@ void __stack_chk_fail(void)
for (;;);
}
+void __stack_chk_fail_local(void);
__attribute__((weak,noreturn,section(".text.nolibc_stack_chk")))
void __stack_chk_fail_local(void)
{
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
index 3892034198dd..a403351dbf60 100644
--- a/tools/include/nolibc/stdio.h
+++ b/tools/include/nolibc/stdio.h
@@ -350,6 +350,104 @@ int printf(const char *fmt, ...)
}
static __attribute__((unused))
+int vsscanf(const char *str, const char *format, va_list args)
+{
+ uintmax_t uval;
+ intmax_t ival;
+ int base;
+ char *endptr;
+ int matches;
+ int lpref;
+
+ matches = 0;
+
+ while (1) {
+ if (*format == '%') {
+ /* start of pattern */
+ lpref = 0;
+ format++;
+
+ if (*format == 'l') {
+ /* same as in printf() */
+ lpref = 1;
+ format++;
+ if (*format == 'l') {
+ lpref = 2;
+ format++;
+ }
+ }
+
+ if (*format == '%') {
+ /* literal % */
+ if ('%' != *str)
+ goto done;
+ str++;
+ format++;
+ continue;
+ } else if (*format == 'd') {
+ ival = strtoll(str, &endptr, 10);
+ if (lpref == 0)
+ *va_arg(args, int *) = ival;
+ else if (lpref == 1)
+ *va_arg(args, long *) = ival;
+ else if (lpref == 2)
+ *va_arg(args, long long *) = ival;
+ } else if (*format == 'u' || *format == 'x' || *format == 'X') {
+ base = *format == 'u' ? 10 : 16;
+ uval = strtoull(str, &endptr, base);
+ if (lpref == 0)
+ *va_arg(args, unsigned int *) = uval;
+ else if (lpref == 1)
+ *va_arg(args, unsigned long *) = uval;
+ else if (lpref == 2)
+ *va_arg(args, unsigned long long *) = uval;
+ } else if (*format == 'p') {
+ *va_arg(args, void **) = (void *)strtoul(str, &endptr, 16);
+ } else {
+ SET_ERRNO(EILSEQ);
+ goto done;
+ }
+
+ format++;
+ str = endptr;
+ matches++;
+
+ } else if (*format == '\0') {
+ goto done;
+ } else if (isspace(*format)) {
+ /* skip spaces in format and str */
+ while (isspace(*format))
+ format++;
+ while (isspace(*str))
+ str++;
+ } else if (*format == *str) {
+ /* literal match */
+ format++;
+ str++;
+ } else {
+ if (!matches)
+ matches = EOF;
+ goto done;
+ }
+ }
+
+done:
+ return matches;
+}
+
+static __attribute__((unused, format(scanf, 2, 3)))
+int sscanf(const char *str, const char *format, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, format);
+ ret = vsscanf(str, format, args);
+ va_end(args);
+ return ret;
+}
+
+static __attribute__((unused))
void perror(const char *msg)
{
fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index 75aa273c23a6..86ad378ab1ea 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -30,6 +30,7 @@ static __attribute__((unused)) char itoa_buffer[21];
*/
/* must be exported, as it's used by libgcc for various divide functions */
+void abort(void);
__attribute__((weak,unused,noreturn,section(".text.nolibc_abort")))
void abort(void)
{
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
index 9ec9c24f38c0..ba84ab700e30 100644
--- a/tools/include/nolibc/string.h
+++ b/tools/include/nolibc/string.h
@@ -32,6 +32,7 @@ int memcmp(const void *s1, const void *s2, size_t n)
/* might be ignored by the compiler without -ffreestanding, then found as
* missing.
*/
+void *memmove(void *dst, const void *src, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memmove")))
void *memmove(void *dst, const void *src, size_t len)
{
@@ -56,6 +57,7 @@ void *memmove(void *dst, const void *src, size_t len)
#ifndef NOLIBC_ARCH_HAS_MEMCPY
/* must be exported, as it's used by libgcc on ARM */
+void *memcpy(void *dst, const void *src, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memcpy")))
void *memcpy(void *dst, const void *src, size_t len)
{
@@ -73,6 +75,7 @@ void *memcpy(void *dst, const void *src, size_t len)
/* might be ignored by the compiler without -ffreestanding, then found as
* missing.
*/
+void *memset(void *dst, int b, size_t len);
__attribute__((weak,unused,section(".text.nolibc_memset")))
void *memset(void *dst, int b, size_t len)
{
@@ -124,6 +127,7 @@ char *strcpy(char *dst, const char *src)
* thus itself, hence the asm() statement below that's meant to disable this
* confusing practice.
*/
+size_t strlen(const char *str);
__attribute__((weak,unused,section(".text.nolibc_strlen")))
size_t strlen(const char *str)
{
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
index d4a5c2399a66..08c1c074bec8 100644
--- a/tools/include/nolibc/sys.h
+++ b/tools/include/nolibc/sys.h
@@ -10,10 +10,10 @@
#include "std.h"
/* system includes */
-#include <asm/unistd.h>
-#include <asm/signal.h> /* for SIGCHLD */
-#include <asm/ioctls.h>
-#include <asm/mman.h>
+#include <linux/unistd.h>
+#include <linux/signal.h> /* for SIGCHLD */
+#include <linux/termios.h>
+#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/loop.h>
#include <linux/time.h>
@@ -23,7 +23,6 @@
#include <linux/prctl.h>
#include <linux/resource.h>
#include <linux/utsname.h>
-#include <linux/signal.h>
#include "arch.h"
#include "errno.h"
@@ -532,20 +531,16 @@ uid_t getuid(void)
/*
- * int ioctl(int fd, unsigned long req, void *value);
+ * int ioctl(int fd, unsigned long cmd, ... arg);
*/
static __attribute__((unused))
-int sys_ioctl(int fd, unsigned long req, void *value)
+long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- return my_syscall3(__NR_ioctl, fd, req, value);
+ return my_syscall3(__NR_ioctl, fd, cmd, arg);
}
-static __attribute__((unused))
-int ioctl(int fd, unsigned long req, void *value)
-{
- return __sysret(sys_ioctl(fd, req, value));
-}
+#define ioctl(fd, cmd, arg) __sysret(sys_ioctl(fd, cmd, (unsigned long)(arg)))
/*
* int kill(pid_t pid, int signal);
@@ -602,9 +597,36 @@ off_t sys_lseek(int fd, off_t offset, int whence)
}
static __attribute__((unused))
+int sys_llseek(int fd, unsigned long offset_high, unsigned long offset_low,
+ __kernel_loff_t *result, int whence)
+{
+#ifdef __NR_llseek
+ return my_syscall5(__NR_llseek, fd, offset_high, offset_low, result, whence);
+#else
+ return __nolibc_enosys(__func__, fd, offset_high, offset_low, result, whence);
+#endif
+}
+
+static __attribute__((unused))
off_t lseek(int fd, off_t offset, int whence)
{
- return __sysret(sys_lseek(fd, offset, whence));
+ __kernel_loff_t loff = 0;
+ off_t result;
+ int ret;
+
+ result = sys_lseek(fd, offset, whence);
+ if (result == -ENOSYS) {
+ /* Only exists on 32bit where nolibc off_t is also 32bit */
+ ret = sys_llseek(fd, 0, offset, &loff, whence);
+ if (ret < 0)
+ result = ret;
+ else if (loff != (off_t)loff)
+ result = -EOVERFLOW;
+ else
+ result = loff;
+ }
+
+ return __sysret(result);
}
@@ -742,6 +764,31 @@ int mount(const char *src, const char *tgt,
return __sysret(sys_mount(src, tgt, fst, flags, data));
}
+/*
+ * int openat(int dirfd, const char *path, int flags[, mode_t mode]);
+ */
+
+static __attribute__((unused))
+int sys_openat(int dirfd, const char *path, int flags, mode_t mode)
+{
+ return my_syscall4(__NR_openat, dirfd, path, flags, mode);
+}
+
+static __attribute__((unused))
+int openat(int dirfd, const char *path, int flags, ...)
+{
+ mode_t mode = 0;
+
+ if (flags & O_CREAT) {
+ va_list args;
+
+ va_start(args, flags);
+ mode = va_arg(args, mode_t);
+ va_end(args);
+ }
+
+ return __sysret(sys_openat(dirfd, path, flags, mode));
+}
/*
* int open(const char *path, int flags[, mode_t mode]);
@@ -750,13 +797,7 @@ int mount(const char *src, const char *tgt,
static __attribute__((unused))
int sys_open(const char *path, int flags, mode_t mode)
{
-#ifdef __NR_openat
return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode);
-#elif defined(__NR_open)
- return my_syscall3(__NR_open, path, flags, mode);
-#else
- return __nolibc_enosys(__func__, path, flags, mode);
-#endif
}
static __attribute__((unused))
@@ -768,7 +809,7 @@ int open(const char *path, int flags, ...)
va_list args;
va_start(args, flags);
- mode = va_arg(args, int);
+ mode = va_arg(args, mode_t);
va_end(args);
}
diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
index e16be0d37746..b8f629ef135f 100644
--- a/tools/include/uapi/linux/const.h
+++ b/tools/include/uapi/linux/const.h
@@ -33,7 +33,7 @@
* Missing asm support
*
* __BIT128() would not work in the asm code, as it shifts an
- * 'unsigned __init128' data type as direct representation of
+ * 'unsigned __int128' data type as direct representation of
* 128 bit constants is not supported in the gcc compiler, as
* they get silently truncated.
*
diff --git a/tools/include/uapi/linux/elf.h b/tools/include/uapi/linux/elf.h
new file mode 100644
index 000000000000..5834b83d7f9a
--- /dev/null
+++ b/tools/include/uapi/linux/elf.h
@@ -0,0 +1,524 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_ELF_H
+#define _LINUX_ELF_H
+
+#include <linux/types.h>
+#include <linux/elf-em.h>
+
+/* 32-bit ELF base types. */
+typedef __u32 Elf32_Addr;
+typedef __u16 Elf32_Half;
+typedef __u32 Elf32_Off;
+typedef __s32 Elf32_Sword;
+typedef __u32 Elf32_Word;
+typedef __u16 Elf32_Versym;
+
+/* 64-bit ELF base types. */
+typedef __u64 Elf64_Addr;
+typedef __u16 Elf64_Half;
+typedef __s16 Elf64_SHalf;
+typedef __u64 Elf64_Off;
+typedef __s32 Elf64_Sword;
+typedef __u32 Elf64_Word;
+typedef __u64 Elf64_Xword;
+typedef __s64 Elf64_Sxword;
+typedef __u16 Elf64_Versym;
+
+/* These constants are for the segment types stored in the image headers */
+#define PT_NULL 0
+#define PT_LOAD 1
+#define PT_DYNAMIC 2
+#define PT_INTERP 3
+#define PT_NOTE 4
+#define PT_SHLIB 5
+#define PT_PHDR 6
+#define PT_TLS 7 /* Thread local storage segment */
+#define PT_LOOS 0x60000000 /* OS-specific */
+#define PT_HIOS 0x6fffffff /* OS-specific */
+#define PT_LOPROC 0x70000000
+#define PT_HIPROC 0x7fffffff
+#define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550)
+#define PT_GNU_STACK (PT_LOOS + 0x474e551)
+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
+#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553)
+
+
+/* ARM MTE memory tag segment type */
+#define PT_AARCH64_MEMTAG_MTE (PT_LOPROC + 0x2)
+
+/*
+ * Extended Numbering
+ *
+ * If the real number of program header table entries is larger than
+ * or equal to PN_XNUM(0xffff), it is set to sh_info field of the
+ * section header at index 0, and PN_XNUM is set to e_phnum
+ * field. Otherwise, the section header at index 0 is zero
+ * initialized, if it exists.
+ *
+ * Specifications are available in:
+ *
+ * - Oracle: Linker and Libraries.
+ * Part No: 817–1984–19, August 2011.
+ * https://docs.oracle.com/cd/E18752_01/pdf/817-1984.pdf
+ *
+ * - System V ABI AMD64 Architecture Processor Supplement
+ * Draft Version 0.99.4,
+ * January 13, 2010.
+ * http://www.cs.washington.edu/education/courses/cse351/12wi/supp-docs/abi.pdf
+ */
+#define PN_XNUM 0xffff
+
+/* These constants define the different elf file types */
+#define ET_NONE 0
+#define ET_REL 1
+#define ET_EXEC 2
+#define ET_DYN 3
+#define ET_CORE 4
+#define ET_LOPROC 0xff00
+#define ET_HIPROC 0xffff
+
+/* This is the info that is needed to parse the dynamic section of the file */
+#define DT_NULL 0
+#define DT_NEEDED 1
+#define DT_PLTRELSZ 2
+#define DT_PLTGOT 3
+#define DT_HASH 4
+#define DT_STRTAB 5
+#define DT_SYMTAB 6
+#define DT_RELA 7
+#define DT_RELASZ 8
+#define DT_RELAENT 9
+#define DT_STRSZ 10
+#define DT_SYMENT 11
+#define DT_INIT 12
+#define DT_FINI 13
+#define DT_SONAME 14
+#define DT_RPATH 15
+#define DT_SYMBOLIC 16
+#define DT_REL 17
+#define DT_RELSZ 18
+#define DT_RELENT 19
+#define DT_PLTREL 20
+#define DT_DEBUG 21
+#define DT_TEXTREL 22
+#define DT_JMPREL 23
+#define DT_ENCODING 32
+#define OLD_DT_LOOS 0x60000000
+#define DT_LOOS 0x6000000d
+#define DT_HIOS 0x6ffff000
+#define DT_VALRNGLO 0x6ffffd00
+#define DT_VALRNGHI 0x6ffffdff
+#define DT_ADDRRNGLO 0x6ffffe00
+#define DT_GNU_HASH 0x6ffffef5
+#define DT_ADDRRNGHI 0x6ffffeff
+#define DT_VERSYM 0x6ffffff0
+#define DT_RELACOUNT 0x6ffffff9
+#define DT_RELCOUNT 0x6ffffffa
+#define DT_FLAGS_1 0x6ffffffb
+#define DT_VERDEF 0x6ffffffc
+#define DT_VERDEFNUM 0x6ffffffd
+#define DT_VERNEED 0x6ffffffe
+#define DT_VERNEEDNUM 0x6fffffff
+#define OLD_DT_HIOS 0x6fffffff
+#define DT_LOPROC 0x70000000
+#define DT_HIPROC 0x7fffffff
+
+/* This info is needed when parsing the symbol table */
+#define STB_LOCAL 0
+#define STB_GLOBAL 1
+#define STB_WEAK 2
+
+#define STN_UNDEF 0
+
+#define STT_NOTYPE 0
+#define STT_OBJECT 1
+#define STT_FUNC 2
+#define STT_SECTION 3
+#define STT_FILE 4
+#define STT_COMMON 5
+#define STT_TLS 6
+
+#define VER_FLG_BASE 0x1
+#define VER_FLG_WEAK 0x2
+
+#define ELF_ST_BIND(x) ((x) >> 4)
+#define ELF_ST_TYPE(x) ((x) & 0xf)
+#define ELF32_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF32_ST_TYPE(x) ELF_ST_TYPE(x)
+#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
+#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
+
+typedef struct {
+ Elf32_Sword d_tag;
+ union {
+ Elf32_Sword d_val;
+ Elf32_Addr d_ptr;
+ } d_un;
+} Elf32_Dyn;
+
+typedef struct {
+ Elf64_Sxword d_tag; /* entry tag value */
+ union {
+ Elf64_Xword d_val;
+ Elf64_Addr d_ptr;
+ } d_un;
+} Elf64_Dyn;
+
+/* The following are used with relocations */
+#define ELF32_R_SYM(x) ((x) >> 8)
+#define ELF32_R_TYPE(x) ((x) & 0xff)
+
+#define ELF64_R_SYM(i) ((i) >> 32)
+#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
+
+typedef struct elf32_rel {
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+} Elf32_Rel;
+
+typedef struct elf64_rel {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+} Elf64_Rel;
+
+typedef struct elf32_rela {
+ Elf32_Addr r_offset;
+ Elf32_Word r_info;
+ Elf32_Sword r_addend;
+} Elf32_Rela;
+
+typedef struct elf64_rela {
+ Elf64_Addr r_offset; /* Location at which to apply the action */
+ Elf64_Xword r_info; /* index and type of relocation */
+ Elf64_Sxword r_addend; /* Constant addend used to compute value */
+} Elf64_Rela;
+
+typedef struct elf32_sym {
+ Elf32_Word st_name;
+ Elf32_Addr st_value;
+ Elf32_Word st_size;
+ unsigned char st_info;
+ unsigned char st_other;
+ Elf32_Half st_shndx;
+} Elf32_Sym;
+
+typedef struct elf64_sym {
+ Elf64_Word st_name; /* Symbol name, index in string tbl */
+ unsigned char st_info; /* Type and binding attributes */
+ unsigned char st_other; /* No defined meaning, 0 */
+ Elf64_Half st_shndx; /* Associated section index */
+ Elf64_Addr st_value; /* Value of the symbol */
+ Elf64_Xword st_size; /* Associated symbol size */
+} Elf64_Sym;
+
+
+#define EI_NIDENT 16
+
+typedef struct elf32_hdr {
+ unsigned char e_ident[EI_NIDENT];
+ Elf32_Half e_type;
+ Elf32_Half e_machine;
+ Elf32_Word e_version;
+ Elf32_Addr e_entry; /* Entry point */
+ Elf32_Off e_phoff;
+ Elf32_Off e_shoff;
+ Elf32_Word e_flags;
+ Elf32_Half e_ehsize;
+ Elf32_Half e_phentsize;
+ Elf32_Half e_phnum;
+ Elf32_Half e_shentsize;
+ Elf32_Half e_shnum;
+ Elf32_Half e_shstrndx;
+} Elf32_Ehdr;
+
+typedef struct elf64_hdr {
+ unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry; /* Entry point virtual address */
+ Elf64_Off e_phoff; /* Program header table file offset */
+ Elf64_Off e_shoff; /* Section header table file offset */
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+} Elf64_Ehdr;
+
+/* These constants define the permissions on sections in the program
+ header, p_flags. */
+#define PF_R 0x4
+#define PF_W 0x2
+#define PF_X 0x1
+
+typedef struct elf32_phdr {
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+ Elf32_Addr p_vaddr;
+ Elf32_Addr p_paddr;
+ Elf32_Word p_filesz;
+ Elf32_Word p_memsz;
+ Elf32_Word p_flags;
+ Elf32_Word p_align;
+} Elf32_Phdr;
+
+typedef struct elf64_phdr {
+ Elf64_Word p_type;
+ Elf64_Word p_flags;
+ Elf64_Off p_offset; /* Segment file offset */
+ Elf64_Addr p_vaddr; /* Segment virtual address */
+ Elf64_Addr p_paddr; /* Segment physical address */
+ Elf64_Xword p_filesz; /* Segment size in file */
+ Elf64_Xword p_memsz; /* Segment size in memory */
+ Elf64_Xword p_align; /* Segment alignment, file & memory */
+} Elf64_Phdr;
+
+/* sh_type */
+#define SHT_NULL 0
+#define SHT_PROGBITS 1
+#define SHT_SYMTAB 2
+#define SHT_STRTAB 3
+#define SHT_RELA 4
+#define SHT_HASH 5
+#define SHT_DYNAMIC 6
+#define SHT_NOTE 7
+#define SHT_NOBITS 8
+#define SHT_REL 9
+#define SHT_SHLIB 10
+#define SHT_DYNSYM 11
+#define SHT_NUM 12
+#define SHT_LOPROC 0x70000000
+#define SHT_HIPROC 0x7fffffff
+#define SHT_LOUSER 0x80000000
+#define SHT_HIUSER 0xffffffff
+
+/* sh_flags */
+#define SHF_WRITE 0x1
+#define SHF_ALLOC 0x2
+#define SHF_EXECINSTR 0x4
+#define SHF_RELA_LIVEPATCH 0x00100000
+#define SHF_RO_AFTER_INIT 0x00200000
+#define SHF_MASKPROC 0xf0000000
+
+/* special section indexes */
+#define SHN_UNDEF 0
+#define SHN_LORESERVE 0xff00
+#define SHN_LOPROC 0xff00
+#define SHN_HIPROC 0xff1f
+#define SHN_LIVEPATCH 0xff20
+#define SHN_ABS 0xfff1
+#define SHN_COMMON 0xfff2
+#define SHN_HIRESERVE 0xffff
+
+typedef struct elf32_shdr {
+ Elf32_Word sh_name;
+ Elf32_Word sh_type;
+ Elf32_Word sh_flags;
+ Elf32_Addr sh_addr;
+ Elf32_Off sh_offset;
+ Elf32_Word sh_size;
+ Elf32_Word sh_link;
+ Elf32_Word sh_info;
+ Elf32_Word sh_addralign;
+ Elf32_Word sh_entsize;
+} Elf32_Shdr;
+
+typedef struct elf64_shdr {
+ Elf64_Word sh_name; /* Section name, index in string tbl */
+ Elf64_Word sh_type; /* Type of section */
+ Elf64_Xword sh_flags; /* Miscellaneous section attributes */
+ Elf64_Addr sh_addr; /* Section virtual addr at execution */
+ Elf64_Off sh_offset; /* Section file offset */
+ Elf64_Xword sh_size; /* Size of section in bytes */
+ Elf64_Word sh_link; /* Index of another section */
+ Elf64_Word sh_info; /* Additional section information */
+ Elf64_Xword sh_addralign; /* Section alignment */
+ Elf64_Xword sh_entsize; /* Entry size if section holds table */
+} Elf64_Shdr;
+
+#define EI_MAG0 0 /* e_ident[] indexes */
+#define EI_MAG1 1
+#define EI_MAG2 2
+#define EI_MAG3 3
+#define EI_CLASS 4
+#define EI_DATA 5
+#define EI_VERSION 6
+#define EI_OSABI 7
+#define EI_PAD 8
+
+#define ELFMAG0 0x7f /* EI_MAG */
+#define ELFMAG1 'E'
+#define ELFMAG2 'L'
+#define ELFMAG3 'F'
+#define ELFMAG "\177ELF"
+#define SELFMAG 4
+
+#define ELFCLASSNONE 0 /* EI_CLASS */
+#define ELFCLASS32 1
+#define ELFCLASS64 2
+#define ELFCLASSNUM 3
+
+#define ELFDATANONE 0 /* e_ident[EI_DATA] */
+#define ELFDATA2LSB 1
+#define ELFDATA2MSB 2
+
+#define EV_NONE 0 /* e_version, EI_VERSION */
+#define EV_CURRENT 1
+#define EV_NUM 2
+
+#define ELFOSABI_NONE 0
+#define ELFOSABI_LINUX 3
+
+#ifndef ELF_OSABI
+#define ELF_OSABI ELFOSABI_NONE
+#endif
+
+/*
+ * Notes used in ET_CORE. Architectures export some of the arch register sets
+ * using the corresponding note types via the PTRACE_GETREGSET and
+ * PTRACE_SETREGSET requests.
+ * The note name for these types is "LINUX", except NT_PRFPREG that is named
+ * "CORE".
+ */
+#define NT_PRSTATUS 1
+#define NT_PRFPREG 2
+#define NT_PRPSINFO 3
+#define NT_TASKSTRUCT 4
+#define NT_AUXV 6
+/*
+ * Note to userspace developers: size of NT_SIGINFO note may increase
+ * in the future to accomodate more fields, don't assume it is fixed!
+ */
+#define NT_SIGINFO 0x53494749
+#define NT_FILE 0x46494c45
+#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
+#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
+#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
+#define NT_PPC_TAR 0x103 /* Target Address Register */
+#define NT_PPC_PPR 0x104 /* Program Priority Register */
+#define NT_PPC_DSCR 0x105 /* Data Stream Control Register */
+#define NT_PPC_EBB 0x106 /* Event Based Branch Registers */
+#define NT_PPC_PMU 0x107 /* Performance Monitor Registers */
+#define NT_PPC_TM_CGPR 0x108 /* TM checkpointed GPR Registers */
+#define NT_PPC_TM_CFPR 0x109 /* TM checkpointed FPR Registers */
+#define NT_PPC_TM_CVMX 0x10a /* TM checkpointed VMX Registers */
+#define NT_PPC_TM_CVSX 0x10b /* TM checkpointed VSX Registers */
+#define NT_PPC_TM_SPR 0x10c /* TM Special Purpose Registers */
+#define NT_PPC_TM_CTAR 0x10d /* TM checkpointed Target Address Register */
+#define NT_PPC_TM_CPPR 0x10e /* TM checkpointed Program Priority Register */
+#define NT_PPC_TM_CDSCR 0x10f /* TM checkpointed Data Stream Control Register */
+#define NT_PPC_PKEY 0x110 /* Memory Protection Keys registers */
+#define NT_PPC_DEXCR 0x111 /* PowerPC DEXCR registers */
+#define NT_PPC_HASHKEYR 0x112 /* PowerPC HASHKEYR register */
+#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
+#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
+#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
+/* Old binutils treats 0x203 as a CET state */
+#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
+#define NT_X86_XSAVE_LAYOUT 0x205 /* XSAVE layout description */
+#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
+#define NT_S390_TIMER 0x301 /* s390 timer register */
+#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
+#define NT_S390_TODPREG 0x303 /* s390 TOD programmable register */
+#define NT_S390_CTRS 0x304 /* s390 control registers */
+#define NT_S390_PREFIX 0x305 /* s390 prefix register */
+#define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */
+#define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */
+#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
+#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
+#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
+#define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */
+#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */
+#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
+#define NT_ARM_TLS 0x401 /* ARM TLS register */
+#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
+#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
+#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
+#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
+#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
+#define NT_ARM_PACA_KEYS 0x407 /* ARM pointer authentication address keys */
+#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
+#define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */
+#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */
+#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
+#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
+#define NT_ARM_ZT 0x40d /* ARM SME ZT registers */
+#define NT_ARM_FPMR 0x40e /* ARM floating point mode register */
+#define NT_ARM_POE 0x40f /* ARM POE registers */
+#define NT_ARM_GCS 0x410 /* ARM GCS state */
+#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
+#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
+#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
+#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */
+#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
+#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
+#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
+#define NT_LOONGARCH_LASX 0xa03 /* LoongArch Loongson Advanced SIMD Extension registers */
+#define NT_LOONGARCH_LBT 0xa04 /* LoongArch Loongson Binary Translation registers */
+#define NT_LOONGARCH_HW_BREAK 0xa05 /* LoongArch hardware breakpoint registers */
+#define NT_LOONGARCH_HW_WATCH 0xa06 /* LoongArch hardware watchpoint registers */
+
+/* Note types with note name "GNU" */
+#define NT_GNU_PROPERTY_TYPE_0 5
+
+/* Note header in a PT_NOTE section */
+typedef struct elf32_note {
+ Elf32_Word n_namesz; /* Name size */
+ Elf32_Word n_descsz; /* Content size */
+ Elf32_Word n_type; /* Content type */
+} Elf32_Nhdr;
+
+/* Note header in a PT_NOTE section */
+typedef struct elf64_note {
+ Elf64_Word n_namesz; /* Name size */
+ Elf64_Word n_descsz; /* Content size */
+ Elf64_Word n_type; /* Content type */
+} Elf64_Nhdr;
+
+/* .note.gnu.property types for EM_AARCH64: */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+
+/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0)
+
+typedef struct {
+ Elf32_Half vd_version;
+ Elf32_Half vd_flags;
+ Elf32_Half vd_ndx;
+ Elf32_Half vd_cnt;
+ Elf32_Word vd_hash;
+ Elf32_Word vd_aux;
+ Elf32_Word vd_next;
+} Elf32_Verdef;
+
+typedef struct {
+ Elf64_Half vd_version;
+ Elf64_Half vd_flags;
+ Elf64_Half vd_ndx;
+ Elf64_Half vd_cnt;
+ Elf64_Word vd_hash;
+ Elf64_Word vd_aux;
+ Elf64_Word vd_next;
+} Elf64_Verdef;
+
+typedef struct {
+ Elf32_Word vda_name;
+ Elf32_Word vda_next;
+} Elf32_Verdaux;
+
+typedef struct {
+ Elf64_Word vda_name;
+ Elf64_Word vda_next;
+} Elf64_Verdaux;
+
+#endif /* _LINUX_ELF_H */
diff --git a/tools/memory-model/Documentation/glossary.txt b/tools/memory-model/Documentation/glossary.txt
index 6f3d16dbf467..7ead94bffa4e 100644
--- a/tools/memory-model/Documentation/glossary.txt
+++ b/tools/memory-model/Documentation/glossary.txt
@@ -15,14 +15,14 @@ Address Dependency: When the address of a later memory access is computed
3 do_something(p->a);
4 rcu_read_unlock();
- In this case, because the address of "p->a" on line 3 is computed
- from the value returned by the rcu_dereference() on line 2, the
- address dependency extends from that rcu_dereference() to that
- "p->a". In rare cases, optimizing compilers can destroy address
- dependencies. Please see Documentation/RCU/rcu_dereference.rst
- for more information.
+ In this case, because the address of "p->a" on line 3 is computed
+ from the value returned by the rcu_dereference() on line 2, the
+ address dependency extends from that rcu_dereference() to that
+ "p->a". In rare cases, optimizing compilers can destroy address
+ dependencies. Please see Documentation/RCU/rcu_dereference.rst
+ for more information.
- See also "Control Dependency" and "Data Dependency".
+ See also "Control Dependency" and "Data Dependency".
Acquire: With respect to a lock, acquiring that lock, for example,
using spin_lock(). With respect to a non-lock shared variable,
@@ -59,12 +59,12 @@ Control Dependency: When a later store's execution depends on a test
1 if (READ_ONCE(x))
2 WRITE_ONCE(y, 1);
- Here, the control dependency extends from the READ_ONCE() on
- line 1 to the WRITE_ONCE() on line 2. Control dependencies are
- fragile, and can be easily destroyed by optimizing compilers.
- Please see control-dependencies.txt for more information.
+ Here, the control dependency extends from the READ_ONCE() on
+ line 1 to the WRITE_ONCE() on line 2. Control dependencies are
+ fragile, and can be easily destroyed by optimizing compilers.
+ Please see control-dependencies.txt for more information.
- See also "Address Dependency" and "Data Dependency".
+ See also "Address Dependency" and "Data Dependency".
Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
name suggests. And in a great many cases, a pair of CPUs is all
@@ -72,10 +72,10 @@ Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
extended to additional CPUs, and the result is called a "cycle".
In a cycle, each CPU's ordering interacts with that of the next:
- CPU 0 CPU 1 CPU 2
- WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
- smp_mb(); smp_mb(); smp_mb();
- r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
+ CPU 0 CPU 1 CPU 2
+ WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
+ smp_mb(); smp_mb(); smp_mb();
+ r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
CPU 0's smp_mb() interacts with that of CPU 1, which interacts
with that of CPU 2, which in turn interacts with that of CPU 0
diff --git a/tools/memory-model/Documentation/herd-representation.txt b/tools/memory-model/Documentation/herd-representation.txt
index ed988906f2b7..4e19b4f2a476 100644
--- a/tools/memory-model/Documentation/herd-representation.txt
+++ b/tools/memory-model/Documentation/herd-representation.txt
@@ -18,6 +18,11 @@
#
# By convention, a blank line in a cell means "same as the preceding line".
#
+# Note that the syntactic representation does not always match the sets and
+# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
+# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
+# link, and W[ACQUIRE] are not included in the Acquire set.
+#
# Disclaimer. The table includes representations of "add" and "and" operations;
# corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
# "andnot" operations are omitted.
@@ -27,16 +32,16 @@
------------------------------------------------------------------------------
| Non-RMW ops | |
------------------------------------------------------------------------------
- | READ_ONCE | R[once] |
+ | READ_ONCE | R[ONCE] |
| atomic_read | |
- | WRITE_ONCE | W[once] |
+ | WRITE_ONCE | W[ONCE] |
| atomic_set | |
- | smp_load_acquire | R[acquire] |
+ | smp_load_acquire | R[ACQUIRE] |
| atomic_read_acquire | |
- | smp_store_release | W[release] |
+ | smp_store_release | W[RELEASE] |
| atomic_set_release | |
- | smp_store_mb | W[once] ->po F[mb] |
- | smp_mb | F[mb] |
+ | smp_store_mb | W[ONCE] ->po F[MB] |
+ | smp_mb | F[MB] |
| smp_rmb | F[rmb] |
| smp_wmb | F[wmb] |
| smp_mb__before_atomic | F[before-atomic] |
@@ -49,8 +54,8 @@
| rcu_read_lock | F[rcu-lock] |
| rcu_read_unlock | F[rcu-unlock] |
| synchronize_rcu | F[sync-rcu] |
- | rcu_dereference | R[once] |
- | rcu_assign_pointer | W[release] |
+ | rcu_dereference | R[ONCE] |
+ | rcu_assign_pointer | W[RELEASE] |
| srcu_read_lock | R[srcu-lock] |
| srcu_down_read | |
| srcu_read_unlock | W[srcu-unlock] |
@@ -60,32 +65,31 @@
------------------------------------------------------------------------------
| RMW ops w/o return value | |
------------------------------------------------------------------------------
- | atomic_add | R*[noreturn] ->rmw W*[once] |
+ | atomic_add | R*[NORETURN] ->rmw W*[NORETURN] |
| atomic_and | |
| spin_lock | LKR ->po LKW |
------------------------------------------------------------------------------
| RMW ops w/ return value | |
------------------------------------------------------------------------------
- | atomic_add_return | F[mb] ->po R*[once] |
- | | ->rmw W*[once] ->po F[mb] |
+ | atomic_add_return | R*[MB] ->rmw W*[MB] |
| atomic_fetch_add | |
| atomic_fetch_and | |
| atomic_xchg | |
| xchg | |
| atomic_add_negative | |
- | atomic_add_return_relaxed | R*[once] ->rmw W*[once] |
+ | atomic_add_return_relaxed | R*[ONCE] ->rmw W*[ONCE] |
| atomic_fetch_add_relaxed | |
| atomic_fetch_and_relaxed | |
| atomic_xchg_relaxed | |
| xchg_relaxed | |
| atomic_add_negative_relaxed | |
- | atomic_add_return_acquire | R*[acquire] ->rmw W*[once] |
+ | atomic_add_return_acquire | R*[ACQUIRE] ->rmw W*[ACQUIRE] |
| atomic_fetch_add_acquire | |
| atomic_fetch_and_acquire | |
| atomic_xchg_acquire | |
| xchg_acquire | |
| atomic_add_negative_acquire | |
- | atomic_add_return_release | R*[once] ->rmw W*[release] |
+ | atomic_add_return_release | R*[RELEASE] ->rmw W*[RELEASE] |
| atomic_fetch_add_release | |
| atomic_fetch_and_release | |
| atomic_xchg_release | |
@@ -94,17 +98,16 @@
------------------------------------------------------------------------------
| Conditional RMW ops | |
------------------------------------------------------------------------------
- | atomic_cmpxchg | On success: F[mb] ->po R*[once] |
- | | ->rmw W*[once] ->po F[mb] |
- | | On failure: R*[once] |
+ | atomic_cmpxchg | On success: R*[MB] ->rmw W*[MB] |
+ | | On failure: R*[MB] |
| cmpxchg | |
| atomic_add_unless | |
- | atomic_cmpxchg_relaxed | On success: R*[once] ->rmw W*[once] |
- | | On failure: R*[once] |
- | atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[once] |
- | | On failure: R*[once] |
- | atomic_cmpxchg_release | On success: R*[once] ->rmw W*[release] |
- | | On failure: R*[once] |
+ | atomic_cmpxchg_relaxed | On success: R*[ONCE] ->rmw W*[ONCE] |
+ | | On failure: R*[ONCE] |
+ | atomic_cmpxchg_acquire | On success: R*[ACQUIRE] ->rmw W*[ACQUIRE] |
+ | | On failure: R*[ACQUIRE] |
+ | atomic_cmpxchg_release | On success: R*[RELEASE] ->rmw W*[RELEASE] |
+ | | On failure: R*[RELEASE] |
| spin_trylock | On success: LKR ->po LKW |
| | On failure: LF |
------------------------------------------------------------------------------
diff --git a/tools/memory-model/README b/tools/memory-model/README
index dab38904206a..64c860863aa9 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -20,7 +20,7 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
-Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
+Version 7.58 or higher of the "herd7" and "klitmus7" tools must be
downloaded separately:
https://github.com/herd/herdtools7
@@ -79,7 +79,7 @@ Several thousand more example litmus tests are available here:
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd
https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus
-Documentation describing litmus tests and now to use them may be found
+Documentation describing litmus tests and how to use them may be found
here:
tools/memory-model/Documentation/litmus-tests.txt
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index ce068700939c..fe65998002b9 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -13,17 +13,18 @@
"Linux-kernel memory consistency model"
-enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
- 'release (*smp_store_release*) ||
- 'acquire (*smp_load_acquire*) ||
- 'noreturn (* R of non-return RMW *)
-instructions R[{'once,'acquire,'noreturn}]
-instructions W[{'once,'release}]
-instructions RMW[{'once,'acquire,'release}]
+enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) ||
+ 'RELEASE (*smp_store_release*) ||
+ 'ACQUIRE (*smp_load_acquire*) ||
+ 'NORETURN (* R of non-return RMW *) ||
+ 'MB (*xchg(),cmpxchg(),...*)
+instructions R[Accesses]
+instructions W[Accesses]
+instructions RMW[Accesses]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
- 'mb (*smp_mb*) ||
+ 'MB (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
@@ -35,6 +36,17 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
+
+(*
+ * Filter out syntactic annotations that do not provide the corresponding
+ * semantic ordering, such as Acquire on a store or Mb on a failed RMW.
+ *)
+let FailedRMW = RMW \ (domain(rmw) | range(rmw))
+let Acquire = ACQUIRE \ W \ FailedRMW
+let Release = RELEASE \ R \ FailedRMW
+let Mb = MB \ FailedRMW
+let Noreturn = NORETURN \ W
+
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
instructions SRCU[SRCU]
@@ -73,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
-let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
+let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW |
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
@@ -82,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data
+
+flag ~empty (if "lkmmv2" then 0 else _)
+ as this-model-requires-variant-higher-than-lkmmv1
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index adf3c4f41229..d7e7bf13c831 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -34,6 +34,16 @@ let R4rmb = R \ Noreturn (* Reads for which rmb works *)
let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb]
let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
+ (*
+ * full-barrier RMWs (successful cmpxchg(), xchg(), etc.) act as
+ * though there were enclosed by smp_mb().
+ * The effect of these virtual smp_mb() is formalized by adding
+ * Mb tags to the read and write of the operation, and providing
+ * the same ordering as though there were additional po edges
+ * between the Mb tag and the read resp. write.
+ *)
+ ([M] ; po ; [Mb & R]) |
+ ([Mb & W] ; po ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
diff --git a/tools/memory-model/linux-kernel.cfg b/tools/memory-model/linux-kernel.cfg
index 3c8098e99f41..69b04f3aad73 100644
--- a/tools/memory-model/linux-kernel.cfg
+++ b/tools/memory-model/linux-kernel.cfg
@@ -1,6 +1,7 @@
macros linux-kernel.def
bell linux-kernel.bell
model linux-kernel.cat
+variant lkmmv2
graph columns
squished true
showevents noregs
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index 88a39601f525..49e402782e49 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -6,18 +6,18 @@
// which appeared in ASPLOS 2018.
// ONCE
-READ_ONCE(X) __load{once}(X)
-WRITE_ONCE(X,V) { __store{once}(X,V); }
+READ_ONCE(X) __load{ONCE}(X)
+WRITE_ONCE(X,V) { __store{ONCE}(X,V); }
// Release Acquire and friends
-smp_store_release(X,V) { __store{release}(*X,V); }
-smp_load_acquire(X) __load{acquire}(*X)
-rcu_assign_pointer(X,V) { __store{release}(X,V); }
-rcu_dereference(X) __load{once}(X)
-smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
+smp_store_release(X,V) { __store{RELEASE}(*X,V); }
+smp_load_acquire(X) __load{ACQUIRE}(*X)
+rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); }
+rcu_dereference(X) __load{ONCE}(X)
+smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; }
// Fences
-smp_mb() { __fence{mb}; }
+smp_mb() { __fence{MB}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
@@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
-xchg(X,V) __xchg{mb}(X,V)
-xchg_relaxed(X,V) __xchg{once}(X,V)
-xchg_release(X,V) __xchg{release}(X,V)
-xchg_acquire(X,V) __xchg{acquire}(X,V)
-cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
+xchg(X,V) __xchg{MB}(X,V)
+xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+xchg_release(X,V) __xchg{RELEASE}(X,V)
+xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
@@ -63,57 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
-atomic_add(V,X) { __atomic_op(X,+,V); }
-atomic_sub(V,X) { __atomic_op(X,-,V); }
-atomic_inc(X) { __atomic_op(X,+,1); }
-atomic_dec(X) { __atomic_op(X,-,1); }
-
-atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
-atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
-atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
-atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
-atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
-atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
-atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
-atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
-
-atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
-atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
-atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
-atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
-atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
-atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
-atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
-atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
-
-atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
-atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
-atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
-atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
-atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
-atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
-atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
-atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
-
-atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
-atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
-atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
-atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
-atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
-atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
-atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
-atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
-
-atomic_xchg(X,V) __xchg{mb}(X,V)
-atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
-atomic_xchg_release(X,V) __xchg{release}(X,V)
-atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
-atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
-atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
-atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
-atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
-
-atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
-atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
-atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
-atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
+atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); }
+atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); }
+atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); }
+atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); }
+atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); }
+atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); }
+atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); }
+atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); }
+
+atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V)
+atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V)
+atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V)
+atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V)
+atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V)
+atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V)
+atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V)
+atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V)
+
+atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V)
+atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V)
+atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V)
+atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V)
+
+atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V)
+atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V)
+atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V)
+atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V)
+
+atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V)
+atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V)
+atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V)
+atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V)
+
+atomic_inc_return(X) __atomic_op_return{MB}(X,+,1)
+atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1)
+atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1)
+atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1)
+atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1)
+atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1)
+atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1)
+atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1)
+
+atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V)
+atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V)
+atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V)
+atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V)
+atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V)
+atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V)
+atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V)
+atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V)
+
+atomic_dec_return(X) __atomic_op_return{MB}(X,-,1)
+atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1)
+atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1)
+atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1)
+atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1)
+atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1)
+atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1)
+atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1)
+
+atomic_xchg(X,V) __xchg{MB}(X,V)
+atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V)
+atomic_xchg_release(X,V) __xchg{RELEASE}(X,V)
+atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
+atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
+atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
+atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
+atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
+
+atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0
+atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0
+atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0
+atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0
+atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0
+atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0
+atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0
+
+atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V)
+atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V)
+atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V)
+atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V)
+
+atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)
diff --git a/tools/objtool/Documentation/objtool.txt b/tools/objtool/Documentation/objtool.txt
index 7c3ee959b63c..28ac57b9e102 100644
--- a/tools/objtool/Documentation/objtool.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -28,6 +28,15 @@ Objtool has the following features:
sites, enabling the kernel to patch them inline, to prevent "thunk
funneling" for both security and performance reasons
+- Return thunk validation -- validates return thunks are used for
+ certain CPU mitigations including Retbleed and SRSO
+
+- Return thunk annotation -- annotates all return thunk sites so kernel
+ can patch them inline, depending on enabled mitigations
+
+- Return thunk training valiation -- validate that all entry paths
+ untrain a "safe return" before the first return (or call)
+
- Non-instrumentation validation -- validates non-instrumentable
("noinstr") code rules, preventing instrumentation in low-level C
entry code
@@ -53,6 +62,9 @@ Objtool has the following features:
- Function entry annotation -- annotates function entries, enabling
kernel function tracing
+- Function preamble (prefix) annotation and/or symbol generation -- used
+ for FineIBT and call depth tracking
+
- Other toolchain hacks which will go unmentioned at this time...
Each feature can be enabled individually or in combination using the
@@ -197,19 +209,17 @@ To achieve the validation, objtool enforces the following rules:
1. Each callable function must be annotated as such with the ELF
function type. In asm code, this is typically done using the
- ENTRY/ENDPROC macros. If objtool finds a return instruction
+ SYM_FUNC_{START,END} macros. If objtool finds a return instruction
outside of a function, it flags an error since that usually indicates
callable code which should be annotated accordingly.
This rule is needed so that objtool can properly identify each
callable function in order to analyze its stack metadata.
-2. Conversely, each section of code which is *not* callable should *not*
- be annotated as an ELF function. The ENDPROC macro shouldn't be used
- in this case.
-
- This rule is needed so that objtool can ignore non-callable code.
- Such code doesn't have to follow any of the other rules.
+2. Conversely, each section of code which is *not* callable, or is
+ otherwise doing funny things with the stack or registers, should
+ *not* be annotated as an ELF function. Rather, SYM_CODE_{START,END}
+ should be used along with unwind hints.
3. Each callable function which calls another function must have the
correct frame pointer logic, if required by CONFIG_FRAME_POINTER or
@@ -221,7 +231,7 @@ To achieve the validation, objtool enforces the following rules:
function B, the _caller_ of function A will be skipped on the stack
trace.
-4. Dynamic jumps and jumps to undefined symbols are only allowed if:
+4. Indirect jumps and jumps to undefined symbols are only allowed if:
a) the jump is part of a switch statement; or
@@ -304,29 +314,29 @@ the objtool maintainers.
001e 2823e: 80 ce 02 or $0x2,%dh
...
+
2. file.o: warning: objtool: .text+0x53: unreachable instruction
Objtool couldn't find a code path to reach the instruction.
If the error is for an asm file, and the instruction is inside (or
reachable from) a callable function, the function should be annotated
- with the ENTRY/ENDPROC macros (ENDPROC is the important one).
- Otherwise, the code should probably be annotated with the unwind hint
- macros in asm/unwind_hints.h so objtool and the unwinder can know the
- stack state associated with the code.
+ with the SYM_FUNC_START and SYM_FUNC_END macros.
- If you're 100% sure the code won't affect stack traces, or if you're
- a just a bad person, you can tell objtool to ignore it. See the
- "Adding exceptions" section below.
+ Otherwise, SYM_CODE_START can be used. In that case the code needs
+ to be annotated with unwind hint macros.
- If it's not actually in a callable function (e.g. kernel entry code),
- change ENDPROC to END.
+ If you're sure the code won't affect the reliability of runtime stack
+ traces and want objtool to ignore it, see "Adding exceptions" below.
-3. file.o: warning: objtool: foo+0x48c: bar() is missing a __noreturn annotation
- The call from foo() to bar() doesn't return, but bar() is missing the
- __noreturn annotation. NOTE: In addition to annotating the function
- with __noreturn, please also add it to tools/objtool/noreturns.h.
+3. file.o: warning: objtool: foo+0x48c: bar() missing __noreturn in .c/.h or NORETURN() in noreturns.h
+
+ The call from foo() to bar() doesn't return, but bar() is incorrectly
+ annotated. A noreturn function must be marked __noreturn in both its
+ declaration and its definition, and must have a NORETURN() annotation
+ in tools/objtool/noreturns.h.
+
4. file.o: warning: objtool: func(): can't find starting instruction
or
@@ -341,23 +351,21 @@ the objtool maintainers.
This is a kernel entry/exit instruction like sysenter or iret. Such
instructions aren't allowed in a callable function, and are most
- likely part of the kernel entry code. They should usually not have
- the callable function annotation (ENDPROC) and should always be
- annotated with the unwind hint macros in asm/unwind_hints.h.
+ likely part of the kernel entry code. Such code should probably be
+ placed in a SYM_FUNC_CODE block with unwind hints.
6. file.o: warning: objtool: func()+0x26: sibling call from callable instruction with modified stack frame
- This is a dynamic jump or a jump to an undefined symbol. Objtool
- assumed it's a sibling call and detected that the frame pointer
- wasn't first restored to its original state.
+ This is a branch to an UNDEF symbol. Objtool assumed it's a
+ sibling call and detected that the stack wasn't first restored to its
+ original state.
- If it's not really a sibling call, you may need to move the
- destination code to the local file.
+ If it's not really a sibling call, you may need to use unwind hints
+ and/or move the destination code to the local file.
If the instruction is not actually in a callable function (e.g.
- kernel entry code), change ENDPROC to END and annotate manually with
- the unwind hint macros in asm/unwind_hints.h.
+ kernel entry code), use SYM_CODE_{START,END} and unwind hints.
7. file: warning: objtool: func()+0x5c: stack state mismatch
@@ -373,8 +381,8 @@ the objtool maintainers.
Another possibility is that the code has some asm or inline asm which
does some unusual things to the stack or the frame pointer. In such
- cases it's probably appropriate to use the unwind hint macros in
- asm/unwind_hints.h.
+ cases it's probably appropriate to use SYM_FUNC_CODE with unwind
+ hints.
8. file.o: warning: objtool: funcA() falls through to next function funcB()
@@ -384,17 +392,16 @@ the objtool maintainers.
can fall through into the next function. There could be different
reasons for this:
- 1) funcA()'s last instruction is a call to a "noreturn" function like
+ a) funcA()'s last instruction is a call to a "noreturn" function like
panic(). In this case the noreturn function needs to be added to
objtool's hard-coded global_noreturns array. Feel free to bug the
objtool maintainer, or you can submit a patch.
- 2) funcA() uses the unreachable() annotation in a section of code
+ b) funcA() uses the unreachable() annotation in a section of code
that is actually reachable.
- 3) If funcA() calls an inline function, the object code for funcA()
- might be corrupt due to a gcc bug. For more details, see:
- https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+ c) Some undefined behavior like divide by zero.
+
9. file.o: warning: objtool: funcA() call to funcB() with UACCESS enabled
@@ -432,24 +439,26 @@ the objtool maintainers.
This limitation can be overcome by massaging the alternatives with
NOPs to shift the stack changes around so they no longer conflict.
+
11. file.o: warning: unannotated intra-function call
- This warning means that a direct call is done to a destination which
- is not at the beginning of a function. If this is a legit call, you
- can remove this warning by putting the ANNOTATE_INTRA_FUNCTION_CALL
- directive right before the call.
+ This warning means that a direct call is done to a destination which
+ is not at the beginning of a function. If this is a legit call, you
+ can remove this warning by putting the ANNOTATE_INTRA_FUNCTION_CALL
+ directive right before the call.
+
12. file.o: warning: func(): not an indirect call target
- This means that objtool is running with --ibt and a function expected
- to be an indirect call target is not. In particular, this happens for
- init_module() or cleanup_module() if a module relies on these special
- names and does not use module_init() / module_exit() macros to create
- them.
+ This means that objtool is running with --ibt and a function
+ expected to be an indirect call target is not. In particular, this
+ happens for init_module() or cleanup_module() if a module relies on
+ these special names and does not use module_init() / module_exit()
+ macros to create them.
If the error doesn't seem to make sense, it could be a bug in objtool.
-Feel free to ask the objtool maintainer for help.
+Feel free to ask objtool maintainers for help.
Adding exceptions
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 7a65948892e5..8c20361dd100 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -37,7 +37,7 @@ OBJTOOL_CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBE
OBJTOOL_LDFLAGS := $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
-elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - | grep elf_getshdr)
+elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - 2>/dev/null | grep elf_getshdr)
OBJTOOL_CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
# Always want host compilation.
diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c
index 69b66994f2a1..02e490555966 100644
--- a/tools/objtool/arch/loongarch/decode.c
+++ b/tools/objtool/arch/loongarch/decode.c
@@ -5,10 +5,7 @@
#include <asm/inst.h>
#include <asm/orc_types.h>
#include <linux/objtool_types.h>
-
-#ifndef EM_LOONGARCH
-#define EM_LOONGARCH 258
-#endif
+#include <arch/elf.h>
int arch_ftrace_match(char *name)
{
@@ -363,3 +360,26 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
state->cfa.base = CFI_SP;
state->cfa.offset = 0;
}
+
+unsigned int arch_reloc_size(struct reloc *reloc)
+{
+ switch (reloc_type(reloc)) {
+ case R_LARCH_32:
+ case R_LARCH_32_PCREL:
+ return 4;
+ default:
+ return 8;
+ }
+}
+
+unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
+{
+ switch (reloc_type(reloc)) {
+ case R_LARCH_32_PCREL:
+ case R_LARCH_64_PCREL:
+ return reloc->sym->offset + reloc_addend(reloc) -
+ (reloc_offset(reloc) - reloc_offset(table));
+ default:
+ return reloc->sym->offset + reloc_addend(reloc);
+ }
+}
diff --git a/tools/objtool/arch/loongarch/include/arch/elf.h b/tools/objtool/arch/loongarch/include/arch/elf.h
index 9623d663220e..ec79062c9554 100644
--- a/tools/objtool/arch/loongarch/include/arch/elf.h
+++ b/tools/objtool/arch/loongarch/include/arch/elf.h
@@ -18,6 +18,13 @@
#ifndef R_LARCH_32_PCREL
#define R_LARCH_32_PCREL 99
#endif
+#ifndef R_LARCH_64_PCREL
+#define R_LARCH_64_PCREL 109
+#endif
+
+#ifndef EM_LOONGARCH
+#define EM_LOONGARCH 258
+#endif
#define R_NONE R_LARCH_NONE
#define R_ABS32 R_LARCH_32
diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c
index 87230ed570fd..e39f86d97002 100644
--- a/tools/objtool/arch/loongarch/special.c
+++ b/tools/objtool/arch/loongarch/special.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <string.h>
#include <objtool/special.h>
+#include <objtool/warn.h>
bool arch_support_alt_relocation(struct special_alt *special_alt,
struct instruction *insn,
@@ -8,9 +10,164 @@ bool arch_support_alt_relocation(struct special_alt *special_alt,
return false;
}
+struct table_info {
+ struct list_head jump_info;
+ unsigned long insn_offset;
+ unsigned long rodata_offset;
+};
+
+static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
+ struct instruction *insn,
+ unsigned long *table_size)
+{
+ struct section *rsec;
+ struct reloc *reloc;
+ struct list_head table_list;
+ struct table_info *orig_table;
+ struct table_info *next_table;
+ unsigned long tmp_insn_offset;
+ unsigned long tmp_rodata_offset;
+
+ rsec = find_section_by_name(file->elf, ".rela.discard.tablejump_annotate");
+ if (!rsec)
+ return;
+
+ INIT_LIST_HEAD(&table_list);
+
+ for_each_reloc(rsec, reloc) {
+ orig_table = malloc(sizeof(struct table_info));
+ if (!orig_table) {
+ WARN("malloc failed");
+ return;
+ }
+
+ orig_table->insn_offset = reloc->sym->offset + reloc_addend(reloc);
+ reloc++;
+ orig_table->rodata_offset = reloc->sym->offset + reloc_addend(reloc);
+
+ list_add_tail(&orig_table->jump_info, &table_list);
+
+ if (reloc_idx(reloc) + 1 == sec_num_entries(rsec))
+ break;
+ }
+
+ list_for_each_entry(orig_table, &table_list, jump_info) {
+ next_table = list_next_entry(orig_table, jump_info);
+ list_for_each_entry_from(next_table, &table_list, jump_info) {
+ if (next_table->rodata_offset < orig_table->rodata_offset) {
+ tmp_insn_offset = next_table->insn_offset;
+ tmp_rodata_offset = next_table->rodata_offset;
+ next_table->insn_offset = orig_table->insn_offset;
+ next_table->rodata_offset = orig_table->rodata_offset;
+ orig_table->insn_offset = tmp_insn_offset;
+ orig_table->rodata_offset = tmp_rodata_offset;
+ }
+ }
+ }
+
+ list_for_each_entry(orig_table, &table_list, jump_info) {
+ if (insn->offset == orig_table->insn_offset) {
+ next_table = list_next_entry(orig_table, jump_info);
+ if (&next_table->jump_info == &table_list) {
+ *table_size = 0;
+ return;
+ }
+
+ while (next_table->rodata_offset == orig_table->rodata_offset) {
+ next_table = list_next_entry(next_table, jump_info);
+ if (&next_table->jump_info == &table_list) {
+ *table_size = 0;
+ return;
+ }
+ }
+
+ *table_size = next_table->rodata_offset - orig_table->rodata_offset;
+ }
+ }
+}
+
+static struct reloc *find_reloc_by_table_annotate(struct objtool_file *file,
+ struct instruction *insn,
+ unsigned long *table_size)
+{
+ struct section *rsec;
+ struct reloc *reloc;
+ unsigned long offset;
+
+ rsec = find_section_by_name(file->elf, ".rela.discard.tablejump_annotate");
+ if (!rsec)
+ return NULL;
+
+ for_each_reloc(rsec, reloc) {
+ if (reloc->sym->sec->rodata)
+ continue;
+
+ if (strcmp(insn->sec->name, reloc->sym->sec->name))
+ continue;
+
+ offset = reloc->sym->offset + reloc_addend(reloc);
+ if (insn->offset == offset) {
+ get_rodata_table_size_by_table_annotate(file, insn, table_size);
+ reloc++;
+ return reloc;
+ }
+ }
+
+ return NULL;
+}
+
+static struct reloc *find_reloc_of_rodata_c_jump_table(struct section *sec,
+ unsigned long offset,
+ unsigned long *table_size)
+{
+ struct section *rsec;
+ struct reloc *reloc;
+
+ rsec = sec->rsec;
+ if (!rsec)
+ return NULL;
+
+ for_each_reloc(rsec, reloc) {
+ if (reloc_offset(reloc) > offset)
+ break;
+
+ if (!strcmp(reloc->sym->sec->name, C_JUMP_TABLE_SECTION)) {
+ *table_size = 0;
+ return reloc;
+ }
+ }
+
+ return NULL;
+}
+
struct reloc *arch_find_switch_table(struct objtool_file *file,
struct instruction *insn,
unsigned long *table_size)
{
- return NULL;
+ struct reloc *annotate_reloc;
+ struct reloc *rodata_reloc;
+ struct section *table_sec;
+ unsigned long table_offset;
+
+ annotate_reloc = find_reloc_by_table_annotate(file, insn, table_size);
+ if (!annotate_reloc) {
+ annotate_reloc = find_reloc_of_rodata_c_jump_table(
+ insn->sec, insn->offset, table_size);
+ if (!annotate_reloc)
+ return NULL;
+ }
+
+ table_sec = annotate_reloc->sym->sec;
+ table_offset = annotate_reloc->sym->offset + reloc_addend(annotate_reloc);
+
+ /*
+ * Each table entry has a rela associated with it. The rela
+ * should reference text in the same function as the original
+ * instruction.
+ */
+ rodata_reloc = find_reloc_by_dest(file->elf, table_sec, table_offset);
+ if (!rodata_reloc)
+ return NULL;
+
+ return rodata_reloc;
}
diff --git a/tools/objtool/arch/powerpc/decode.c b/tools/objtool/arch/powerpc/decode.c
index 53b55690f320..7c0bf2429067 100644
--- a/tools/objtool/arch/powerpc/decode.c
+++ b/tools/objtool/arch/powerpc/decode.c
@@ -106,3 +106,17 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
state->regs[CFI_RA].base = CFI_CFA;
state->regs[CFI_RA].offset = 0;
}
+
+unsigned int arch_reloc_size(struct reloc *reloc)
+{
+ switch (reloc_type(reloc)) {
+ case R_PPC_REL32:
+ case R_PPC_ADDR32:
+ case R_PPC_UADDR32:
+ case R_PPC_PLT32:
+ case R_PPC_PLTREL32:
+ return 4;
+ default:
+ return 8;
+ }
+}
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index fe1362c34564..7567c893f45e 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -850,5 +850,19 @@ bool arch_is_rethunk(struct symbol *sym)
bool arch_is_embedded_insn(struct symbol *sym)
{
return !strcmp(sym->name, "retbleed_return_thunk") ||
+ !strcmp(sym->name, "srso_alias_safe_ret") ||
!strcmp(sym->name, "srso_safe_ret");
}
+
+unsigned int arch_reloc_size(struct reloc *reloc)
+{
+ switch (reloc_type(reloc)) {
+ case R_X86_64_32:
+ case R_X86_64_32S:
+ case R_X86_64_PC32:
+ case R_X86_64_PLT32:
+ return 4;
+ default:
+ return 8;
+ }
+}
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 387d56a7f5fb..5f761f420b8c 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -6,6 +6,10 @@
#include <subcmd/parse-options.h>
#include <string.h>
#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/sendfile.h>
#include <objtool/builtin.h>
#include <objtool/objtool.h>
@@ -14,6 +18,8 @@
"error: objtool: " format "\n", \
##__VA_ARGS__)
+const char *objname;
+
struct opts opts;
static const char * const check_usage[] = {
@@ -71,7 +77,7 @@ static const struct option check_options[] = {
OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"),
OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"),
OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
- OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"),
+ OPT_BOOLEAN(0, "orc", &opts.orc, "generate ORC metadata"),
OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
@@ -84,16 +90,17 @@ static const struct option check_options[] = {
OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump),
OPT_GROUP("Options:"),
- OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
- OPT_BOOLEAN(0, "backup", &opts.backup, "create .orig files before modification"),
- OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
- OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
- OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
- OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
- OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
- OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
- OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
+ OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
+ OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
+ OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
+ OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
+ OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
+ OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
+ OPT_STRING('o', "output", &opts.output, "file", "output file name"),
+ OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
+ OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
OPT_BOOLEAN('v', "verbose", &opts.verbose, "verbose warnings"),
+ OPT_BOOLEAN(0, "Werror", &opts.werror, "return error on warnings"),
OPT_END(),
};
@@ -131,6 +138,26 @@ int cmd_parse_options(int argc, const char **argv, const char * const usage[])
static bool opts_valid(void)
{
+ if (opts.mnop && !opts.mcount) {
+ ERROR("--mnop requires --mcount");
+ return false;
+ }
+
+ if (opts.noinstr && !opts.link) {
+ ERROR("--noinstr requires --link");
+ return false;
+ }
+
+ if (opts.ibt && !opts.link) {
+ ERROR("--ibt requires --link");
+ return false;
+ }
+
+ if (opts.unret && !opts.link) {
+ ERROR("--unret requires --link");
+ return false;
+ }
+
if (opts.hack_jump_label ||
opts.hack_noinstr ||
opts.ibt ||
@@ -151,11 +178,6 @@ static bool opts_valid(void)
return true;
}
- if (opts.unret && !opts.rethunk) {
- ERROR("--unret requires --rethunk");
- return false;
- }
-
if (opts.dump_orc)
return true;
@@ -163,76 +185,156 @@ static bool opts_valid(void)
return false;
}
-static bool mnop_opts_valid(void)
+static int copy_file(const char *src, const char *dst)
{
- if (opts.mnop && !opts.mcount) {
- ERROR("--mnop requires --mcount");
- return false;
+ size_t to_copy, copied;
+ int dst_fd, src_fd;
+ struct stat stat;
+ off_t offset = 0;
+
+ src_fd = open(src, O_RDONLY);
+ if (src_fd == -1) {
+ ERROR("can't open '%s' for reading", src);
+ return 1;
}
- return true;
-}
-
-static bool link_opts_valid(struct objtool_file *file)
-{
- if (opts.link)
- return true;
+ dst_fd = open(dst, O_WRONLY | O_CREAT | O_TRUNC, 0400);
+ if (dst_fd == -1) {
+ ERROR("can't open '%s' for writing", dst);
+ return 1;
+ }
- if (has_multiple_files(file->elf)) {
- ERROR("Linked object detected, forcing --link");
- opts.link = true;
- return true;
+ if (fstat(src_fd, &stat) == -1) {
+ perror("fstat");
+ return 1;
}
- if (opts.noinstr) {
- ERROR("--noinstr requires --link");
- return false;
+ if (fchmod(dst_fd, stat.st_mode) == -1) {
+ perror("fchmod");
+ return 1;
}
- if (opts.ibt) {
- ERROR("--ibt requires --link");
- return false;
+ for (to_copy = stat.st_size; to_copy > 0; to_copy -= copied) {
+ copied = sendfile(dst_fd, src_fd, &offset, to_copy);
+ if (copied == -1) {
+ perror("sendfile");
+ return 1;
+ }
}
- if (opts.unret) {
- ERROR("--unret requires --link");
- return false;
+ close(dst_fd);
+ close(src_fd);
+ return 0;
+}
+
+static char **save_argv(int argc, const char **argv)
+{
+ char **orig_argv;
+
+ orig_argv = calloc(argc, sizeof(char *));
+ if (!orig_argv) {
+ perror("calloc");
+ return NULL;
}
- return true;
+ for (int i = 0; i < argc; i++) {
+ orig_argv[i] = strdup(argv[i]);
+ if (!orig_argv[i]) {
+ perror("strdup");
+ return NULL;
+ }
+ };
+
+ return orig_argv;
}
+#define ORIG_SUFFIX ".orig"
+
int objtool_run(int argc, const char **argv)
{
- const char *objname;
struct objtool_file *file;
- int ret;
+ char *backup = NULL;
+ char **orig_argv;
+ int ret = 0;
- argc = cmd_parse_options(argc, argv, check_usage);
- objname = argv[0];
+ orig_argv = save_argv(argc, argv);
+ if (!orig_argv)
+ return 1;
+
+ cmd_parse_options(argc, argv, check_usage);
if (!opts_valid())
return 1;
+ objname = argv[0];
+
if (opts.dump_orc)
return orc_dump(objname);
+ if (!opts.dryrun && opts.output) {
+ /* copy original .o file to output file */
+ if (copy_file(objname, opts.output))
+ return 1;
+
+ /* from here on, work directly on the output file */
+ objname = opts.output;
+ }
+
file = objtool_open_read(objname);
if (!file)
- return 1;
+ goto err;
- if (!mnop_opts_valid())
- return 1;
-
- if (!link_opts_valid(file))
- return 1;
+ if (!opts.link && has_multiple_files(file->elf)) {
+ ERROR("Linked object requires --link");
+ goto err;
+ }
ret = check(file);
if (ret)
- return ret;
+ goto err;
- if (file->elf->changed)
- return elf_write(file->elf);
+ if (!opts.dryrun && file->elf->changed && elf_write(file->elf))
+ goto err;
return 0;
+
+err:
+ if (opts.dryrun)
+ goto err_msg;
+
+ if (opts.output) {
+ unlink(opts.output);
+ goto err_msg;
+ }
+
+ /*
+ * Make a backup before kbuild deletes the file so the error
+ * can be recreated without recompiling or relinking.
+ */
+ backup = malloc(strlen(objname) + strlen(ORIG_SUFFIX) + 1);
+ if (!backup) {
+ perror("malloc");
+ return 1;
+ }
+
+ strcpy(backup, objname);
+ strcat(backup, ORIG_SUFFIX);
+ if (copy_file(objname, backup))
+ return 1;
+
+err_msg:
+ fprintf(stderr, "%s", orig_argv[0]);
+
+ for (int i = 1; i < argc; i++) {
+ char *arg = orig_argv[i];
+
+ if (backup && !strcmp(arg, objname))
+ fprintf(stderr, " %s -o %s", backup, objname);
+ else
+ fprintf(stderr, " %s", arg);
+ }
+
+ fprintf(stderr, "\n");
+
+ return 1;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index ce973d9d8e6d..ca3435acc326 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1284,15 +1284,6 @@ static void annotate_call_site(struct objtool_file *file,
if (!sym)
sym = reloc->sym;
- /*
- * Alternative replacement code is just template code which is
- * sometimes copied to the original instruction. For now, don't
- * annotate it. (In the future we might consider annotating the
- * original instruction if/when it ever makes sense to do so.)
- */
- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
- return;
-
if (sym->static_call_tramp) {
list_add_tail(&insn->call_node, &file->static_call_list);
return;
@@ -1350,7 +1341,8 @@ static void annotate_call_site(struct objtool_file *file,
return;
}
- if (insn->type == INSN_CALL && !insn->sec->init)
+ if (insn->type == INSN_CALL && !insn->sec->init &&
+ !insn->_call_dest->embedded_insn)
list_add_tail(&insn->call_node, &file->call_list);
if (!sibling && dead_end_function(file, sym))
@@ -1944,6 +1936,11 @@ out:
return ret;
}
+__weak unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table)
+{
+ return reloc->sym->offset + reloc_addend(reloc);
+}
+
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
struct reloc *next_table)
{
@@ -1954,6 +1951,7 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
unsigned int prev_offset = 0;
struct reloc *reloc = table;
struct alternative *alt;
+ unsigned long sym_offset;
/*
* Each @reloc is a switch table relocation which points to the target
@@ -1968,12 +1966,13 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
break;
/* Make sure the table entries are consecutive: */
- if (prev_offset && reloc_offset(reloc) != prev_offset + 8)
+ if (prev_offset && reloc_offset(reloc) != prev_offset + arch_reloc_size(reloc))
break;
+ sym_offset = arch_jump_table_sym_offset(reloc, table);
+
/* Detect function pointers from contiguous objects: */
- if (reloc->sym->sec == pfunc->sec &&
- reloc_addend(reloc) == pfunc->offset)
+ if (reloc->sym->sec == pfunc->sec && sym_offset == pfunc->offset)
break;
/*
@@ -1981,10 +1980,10 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
* which point to the end of the function. Ignore them.
*/
if (reloc->sym->sec == pfunc->sec &&
- reloc_addend(reloc) == pfunc->offset + pfunc->len)
+ sym_offset == pfunc->offset + pfunc->len)
goto next;
- dest_insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc));
+ dest_insn = find_insn(file, reloc->sym->sec, sym_offset);
if (!dest_insn)
break;
@@ -2023,6 +2022,7 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
struct reloc *table_reloc;
struct instruction *dest_insn, *orig_insn = insn;
unsigned long table_size;
+ unsigned long sym_offset;
/*
* Backward search using the @first_jump_src links, these help avoid
@@ -2046,7 +2046,10 @@ static void find_jump_table(struct objtool_file *file, struct symbol *func,
table_reloc = arch_find_switch_table(file, insn, &table_size);
if (!table_reloc)
continue;
- dest_insn = find_insn(file, table_reloc->sym->sec, reloc_addend(table_reloc));
+
+ sym_offset = table_reloc->sym->offset + reloc_addend(table_reloc);
+
+ dest_insn = find_insn(file, table_reloc->sym->sec, sym_offset);
if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
continue;
@@ -4449,35 +4452,6 @@ static int validate_sls(struct objtool_file *file)
return warnings;
}
-static bool ignore_noreturn_call(struct instruction *insn)
-{
- struct symbol *call_dest = insn_call_dest(insn);
-
- /*
- * FIXME: hack, we need a real noreturn solution
- *
- * Problem is, exc_double_fault() may or may not return, depending on
- * whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility
- * to the kernel config.
- *
- * Other potential ways to fix it:
- *
- * - have compiler communicate __noreturn functions somehow
- * - remove CONFIG_X86_ESPFIX64
- * - read the .config file
- * - add a cmdline option
- * - create a generic objtool annotation format (vs a bunch of custom
- * formats) and annotate it
- */
- if (!strcmp(call_dest->name, "exc_double_fault")) {
- /* prevent further unreachable warnings for the caller */
- insn->sym->warned = 1;
- return true;
- }
-
- return false;
-}
-
static int validate_reachable_instructions(struct objtool_file *file)
{
struct instruction *insn, *prev_insn;
@@ -4494,8 +4468,8 @@ static int validate_reachable_instructions(struct objtool_file *file)
prev_insn = prev_insn_same_sec(file, insn);
if (prev_insn && prev_insn->dead_end) {
call_dest = insn_call_dest(prev_insn);
- if (call_dest && !ignore_noreturn_call(prev_insn)) {
- WARN_INSN(insn, "%s() is missing a __noreturn annotation",
+ if (call_dest) {
+ WARN_INSN(insn, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
call_dest->name);
warnings++;
continue;
@@ -4565,7 +4539,7 @@ static int disas_warned_funcs(struct objtool_file *file)
char *funcs = NULL, *tmp;
for_each_sym(file, sym) {
- if (sym->warned) {
+ if (sym->warnings) {
if (!funcs) {
funcs = malloc(strlen(sym->name) + 1);
strcpy(funcs, sym->name);
@@ -4623,8 +4597,10 @@ int check(struct objtool_file *file)
init_cfi_state(&force_undefined_cfi);
force_undefined_cfi.force_undefined = true;
- if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
+ if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) {
+ ret = -1;
goto out;
+ }
cfi_hash_add(&init_cfi);
cfi_hash_add(&func_cfi);
@@ -4641,7 +4617,7 @@ int check(struct objtool_file *file)
if (opts.retpoline) {
ret = validate_retpoline(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
@@ -4677,7 +4653,7 @@ int check(struct objtool_file *file)
*/
ret = validate_unrets(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
@@ -4740,7 +4716,7 @@ int check(struct objtool_file *file)
if (opts.prefix) {
ret = add_prefix_symbols(file);
if (ret < 0)
- return ret;
+ goto out;
warnings += ret;
}
@@ -4772,9 +4748,18 @@ int check(struct objtool_file *file)
out:
/*
- * For now, don't fail the kernel build on fatal warnings. These
- * errors are still fairly common due to the growing matrix of
- * supported toolchains and their recent pace of change.
+ * CONFIG_OBJTOOL_WERROR upgrades all warnings (and errors) to actual
+ * errors.
+ *
+ * Note that even "fatal" type errors don't actually return an error
+ * without CONFIG_OBJTOOL_WERROR. That probably needs improved at some
+ * point.
*/
+ if (opts.werror && (ret || warnings)) {
+ if (warnings)
+ WARN("%d warning(s) upgraded to errors", warnings);
+ return 1;
+ }
+
return 0;
}
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 6f64d611faea..be4f4b62730c 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -1302,9 +1302,6 @@ int elf_write(struct elf *elf)
struct section *sec;
Elf_Scn *s;
- if (opts.dryrun)
- return 0;
-
/* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
if (sec->truncate)
diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index d63b46a19f39..089a1acc48a8 100644
--- a/tools/objtool/include/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -97,4 +97,7 @@ int arch_rewrite_retpolines(struct objtool_file *file);
bool arch_pc_relative_reloc(struct reloc *reloc);
+unsigned int arch_reloc_size(struct reloc *reloc);
+unsigned long arch_jump_table_sym_offset(struct reloc *reloc, struct reloc *table);
+
#endif /* _ARCH_H */
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
index fcca6662c8b4..0fafd0f7a209 100644
--- a/tools/objtool/include/objtool/builtin.h
+++ b/tools/objtool/include/objtool/builtin.h
@@ -29,15 +29,16 @@ struct opts {
/* options: */
bool backtrace;
- bool backup;
bool dryrun;
bool link;
bool mnop;
bool module;
bool no_unreachable;
+ const char *output;
bool sec_address;
bool stats;
bool verbose;
+ bool werror;
};
extern struct opts opts;
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
index d7e815c2fd15..223ac1c24b90 100644
--- a/tools/objtool/include/objtool/elf.h
+++ b/tools/objtool/include/objtool/elf.h
@@ -65,10 +65,10 @@ struct symbol {
u8 return_thunk : 1;
u8 fentry : 1;
u8 profiling_func : 1;
- u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
u8 frame_pointer : 1;
+ u8 warnings : 2;
struct list_head pv_target;
struct reloc *relocs;
};
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index ac04d3fe4dd9..e72b9d630551 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -43,8 +43,10 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define WARN(format, ...) \
fprintf(stderr, \
- "%s: warning: objtool: " format "\n", \
- objname, ##__VA_ARGS__)
+ "%s: %s: objtool: " format "\n", \
+ objname, \
+ opts.werror ? "error" : "warning", \
+ ##__VA_ARGS__)
#define WARN_FUNC(format, sec, offset, ...) \
({ \
@@ -53,14 +55,22 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \
})
+#define WARN_LIMIT 2
+
#define WARN_INSN(insn, format, ...) \
({ \
struct instruction *_insn = (insn); \
- if (!_insn->sym || !_insn->sym->warned) \
+ BUILD_BUG_ON(WARN_LIMIT > 2); \
+ if (!_insn->sym || _insn->sym->warnings < WARN_LIMIT) { \
WARN_FUNC(format, _insn->sec, _insn->offset, \
##__VA_ARGS__); \
- if (_insn->sym) \
- _insn->sym->warned = 1; \
+ if (_insn->sym) \
+ _insn->sym->warnings++; \
+ } else if (_insn->sym && _insn->sym->warnings == WARN_LIMIT) { \
+ WARN_FUNC("skipping duplicate warning(s)", \
+ _insn->sec, _insn->offset); \
+ _insn->sym->warnings++; \
+ } \
})
#define BT_INSN(insn, format, ...) \
diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h
index 6bb7edda3094..eacfe3b0a8d1 100644
--- a/tools/objtool/noreturns.h
+++ b/tools/objtool/noreturns.h
@@ -16,6 +16,7 @@ NORETURN(__tdx_hypercall_failed)
NORETURN(__ubsan_handle_builtin_unreachable)
NORETURN(__x64_sys_exit)
NORETURN(__x64_sys_exit_group)
+NORETURN(acpi_processor_ffh_play_dead)
NORETURN(arch_cpu_idle_dead)
NORETURN(bch2_trans_in_restart_error)
NORETURN(bch2_trans_restart_error)
@@ -34,6 +35,7 @@ NORETURN(kunit_try_catch_throw)
NORETURN(machine_real_restart)
NORETURN(make_task_dead)
NORETURN(mpt_halt_firmware)
+NORETURN(mwait_play_dead)
NORETURN(nmi_panic_self_stop)
NORETURN(panic)
NORETURN(panic_smp_self_stop)
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index f40febdd6e36..1c73fb62fd57 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -18,87 +18,19 @@
bool help;
-const char *objname;
static struct objtool_file file;
-static bool objtool_create_backup(const char *_objname)
+struct objtool_file *objtool_open_read(const char *filename)
{
- int len = strlen(_objname);
- char *buf, *base, *name = malloc(len+6);
- int s, d, l, t;
-
- if (!name) {
- perror("failed backup name malloc");
- return false;
- }
-
- strcpy(name, _objname);
- strcpy(name + len, ".orig");
-
- d = open(name, O_CREAT|O_WRONLY|O_TRUNC, 0644);
- if (d < 0) {
- perror("failed to create backup file");
- return false;
- }
-
- s = open(_objname, O_RDONLY);
- if (s < 0) {
- perror("failed to open orig file");
- return false;
- }
-
- buf = malloc(4096);
- if (!buf) {
- perror("failed backup data malloc");
- return false;
- }
-
- while ((l = read(s, buf, 4096)) > 0) {
- base = buf;
- do {
- t = write(d, base, l);
- if (t < 0) {
- perror("failed backup write");
- return false;
- }
- base += t;
- l -= t;
- } while (l);
- }
-
- if (l < 0) {
- perror("failed backup read");
- return false;
- }
-
- free(name);
- free(buf);
- close(d);
- close(s);
-
- return true;
-}
-
-struct objtool_file *objtool_open_read(const char *_objname)
-{
- if (objname) {
- if (strcmp(objname, _objname)) {
- WARN("won't handle more than one file at a time");
- return NULL;
- }
- return &file;
+ if (file.elf) {
+ WARN("won't handle more than one file at a time");
+ return NULL;
}
- objname = _objname;
- file.elf = elf_open_read(objname, O_RDWR);
+ file.elf = elf_open_read(filename, O_RDWR);
if (!file.elf)
return NULL;
- if (opts.backup && !objtool_create_backup(objname)) {
- WARN("can't create backup file");
- return NULL;
- }
-
hash_init(file.insn_hash);
INIT_LIST_HEAD(&file.retpoline_call_list);
INIT_LIST_HEAD(&file.return_thunk_list);
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index a62247efb64f..05ef0e297837 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -10,7 +10,7 @@
#include <objtool/warn.h>
#include <objtool/endianness.h>
-int orc_dump(const char *_objname)
+int orc_dump(const char *filename)
{
int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0;
struct orc_entry *orc = NULL;
@@ -26,12 +26,9 @@ int orc_dump(const char *_objname)
Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
struct elf dummy_elf = {};
-
- objname = _objname;
-
elf_version(EV_CURRENT);
- fd = open(objname, O_RDONLY);
+ fd = open(filename, O_RDONLY);
if (fd == -1) {
perror("open");
return -1;
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index d3c6e10dce73..a4499e5a6f9c 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -26,8 +26,6 @@ FILES=(
"include/linux/hash.h"
"include/linux/list-sort.h"
"include/uapi/linux/hw_breakpoint.h"
- "arch/x86/include/asm/disabled-features.h"
- "arch/x86/include/asm/required-features.h"
"arch/x86/include/asm/cpufeatures.h"
"arch/x86/include/asm/inat_types.h"
"arch/x86/include/asm/emulate_prefix.h"
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 51a95239fe06..835123add0ed 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -52,8 +52,11 @@ DESTDIR ?=
# and _should_ modify the PACKAGE_BUGREPORT definition
VERSION:= $(shell ./utils/version-gen.sh)
-LIB_MAJ= 0.0.1
-LIB_MIN= 1
+LIB_FIX= 1
+LIB_MIN= 0
+LIB_MAJ= 1
+LIB_VER= $(LIB_MAJ).$(LIB_MIN).$(LIB_FIX)
+
PACKAGE = cpupower
PACKAGE_BUGREPORT = linux-pm@vger.kernel.org
@@ -200,14 +203,14 @@ $(OUTPUT)lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
-$(OUTPUT)libcpupower.so.$(LIB_MAJ): $(LIB_OBJS)
+$(OUTPUT)libcpupower.so.$(LIB_VER): $(LIB_OBJS)
$(ECHO) " LD " $@
$(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
- -Wl,-soname,libcpupower.so.$(LIB_MIN) $(LIB_OBJS)
+ -Wl,-soname,libcpupower.so.$(LIB_MAJ) $(LIB_OBJS)
@ln -sf $(@F) $(OUTPUT)libcpupower.so
- @ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MIN)
+ @ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
-libcpupower: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
+libcpupower: $(OUTPUT)libcpupower.so.$(LIB_VER)
# Let all .o files depend on its .c file and all headers
# Might be worth to put this into utils/Makefile at some point of time
@@ -217,7 +220,7 @@ $(OUTPUT)%.o: %.c
$(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
-$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
+$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_VER)
$(ECHO) " CC " $@
ifeq ($(strip $(STATIC)),true)
$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lrt -lpci -L$(OUTPUT) -o $@
@@ -262,7 +265,7 @@ update-po: $(OUTPUT)po/$(PACKAGE).pot
done;
endif
-compile-bench: $(OUTPUT)libcpupower.so.$(LIB_MAJ)
+compile-bench: $(OUTPUT)libcpupower.so.$(LIB_VER)
@V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT)
# we compile into subdirectories. if the target directory is not the
diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c
index 080678d9d74e..bd67c758b33a 100644
--- a/tools/power/cpupower/bench/parse.c
+++ b/tools/power/cpupower/bench/parse.c
@@ -121,6 +121,10 @@ out_dir:
struct config *prepare_default_config()
{
struct config *config = malloc(sizeof(struct config));
+ if (!config) {
+ perror("malloc");
+ return NULL;
+ }
dprintf("loading defaults\n");
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c
index 7a2ef691b20e..ce8dfb8e46ab 100644
--- a/tools/power/cpupower/lib/cpupower.c
+++ b/tools/power/cpupower/lib/cpupower.c
@@ -10,6 +10,7 @@
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
+#include <string.h>
#include "cpupower.h"
#include "cpupower_intern.h"
@@ -150,15 +151,25 @@ static int __compare(const void *t1, const void *t2)
return 0;
}
+static int __compare_core_cpu_list(const void *t1, const void *t2)
+{
+ struct cpuid_core_info *top1 = (struct cpuid_core_info *)t1;
+ struct cpuid_core_info *top2 = (struct cpuid_core_info *)t2;
+
+ return strcmp(top1->core_cpu_list, top2->core_cpu_list);
+}
+
/*
* Returns amount of cpus, negative on error, cpu_top must be
* passed to cpu_topology_release to free resources
*
- * Array is sorted after ->pkg, ->core, then ->cpu
+ * Array is sorted after ->cpu_smt_list ->pkg, ->core
*/
int get_cpu_topology(struct cpupower_topology *cpu_top)
{
int cpu, last_pkg, cpus = sysconf(_SC_NPROCESSORS_CONF);
+ char path[SYSFS_PATH_MAX];
+ char *last_cpu_list;
cpu_top->core_info = malloc(sizeof(struct cpuid_core_info) * cpus);
if (cpu_top->core_info == NULL)
@@ -183,6 +194,34 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
cpu_top->core_info[cpu].core = -1;
continue;
}
+ if (cpu_top->core_info[cpu].core == -1) {
+ strncpy(cpu_top->core_info[cpu].core_cpu_list, "-1", CPULIST_BUFFER);
+ continue;
+ }
+ snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
+ cpu, "core_cpus_list");
+ if (cpupower_read_sysfs(
+ path,
+ cpu_top->core_info[cpu].core_cpu_list,
+ CPULIST_BUFFER) < 1) {
+ printf("Warning CPU%u has a 0 size core_cpus_list string", cpu);
+ }
+ }
+
+ /* Count the number of distinct cpu lists to get the physical core
+ * count.
+ */
+ qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
+ __compare_core_cpu_list);
+
+ last_cpu_list = cpu_top->core_info[0].core_cpu_list;
+ cpu_top->cores = 1;
+ for (cpu = 1; cpu < cpus; cpu++) {
+ if (strcmp(cpu_top->core_info[cpu].core_cpu_list, last_cpu_list) != 0 &&
+ cpu_top->core_info[cpu].pkg != -1) {
+ last_cpu_list = cpu_top->core_info[cpu].core_cpu_list;
+ cpu_top->cores++;
+ }
}
qsort(cpu_top->core_info, cpus, sizeof(struct cpuid_core_info),
@@ -203,13 +242,6 @@ int get_cpu_topology(struct cpupower_topology *cpu_top)
if (!(cpu_top->core_info[0].pkg == -1))
cpu_top->pkgs++;
- /* Intel's cores count is not consecutively numbered, there may
- * be a core_id of 3, but none of 2. Assume there always is 0
- * Get amount of cores by counting duplicates in a package
- for (cpu = 0; cpu_top->core_info[cpu].pkg = 0 && cpu < cpus; cpu++) {
- if (cpu_top->core_info[cpu].core == 0)
- cpu_top->cores++;
- */
return cpus;
}
diff --git a/tools/power/cpupower/lib/cpupower.h b/tools/power/cpupower/lib/cpupower.h
index e4e4292eacec..2e67a080f203 100644
--- a/tools/power/cpupower/lib/cpupower.h
+++ b/tools/power/cpupower/lib/cpupower.h
@@ -2,6 +2,8 @@
#ifndef __CPUPOWER_CPUPOWER_H__
#define __CPUPOWER_CPUPOWER_H__
+#define CPULIST_BUFFER 5
+
struct cpupower_topology {
/* Amount of CPU cores, packages and threads per core in the system */
unsigned int cores;
@@ -16,6 +18,7 @@ struct cpuid_core_info {
int pkg;
int core;
int cpu;
+ char core_cpu_list[CPULIST_BUFFER];
/* flags */
unsigned int is_online:1;
diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
index f746099b5dac..ad493157f826 100644
--- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
+++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
@@ -6,6 +6,7 @@
*/
+#include <errno.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
@@ -91,7 +92,11 @@ int fill_string_with_spaces(char *s, int n)
return 0;
}
-#define MAX_COL_WIDTH 6
+#define MAX_COL_WIDTH 6
+#define TOPOLOGY_DEPTH_PKG 3
+#define TOPOLOGY_DEPTH_CORE 2
+#define TOPOLOGY_DEPTH_CPU 1
+
void print_header(int topology_depth)
{
int unsigned mon;
@@ -113,12 +118,19 @@ void print_header(int topology_depth)
}
printf("\n");
- if (topology_depth > 2)
+ switch (topology_depth) {
+ case TOPOLOGY_DEPTH_PKG:
printf(" PKG|");
- if (topology_depth > 1)
+ break;
+ case TOPOLOGY_DEPTH_CORE:
printf("CORE|");
- if (topology_depth > 0)
+ break;
+ case TOPOLOGY_DEPTH_CPU:
printf(" CPU|");
+ break;
+ default:
+ return;
+ }
for (mon = 0; mon < avail_monitors; mon++) {
if (mon != 0)
@@ -152,12 +164,19 @@ void print_results(int topology_depth, int cpu)
cpu_top.core_info[cpu].pkg == -1)
return;
- if (topology_depth > 2)
+ switch (topology_depth) {
+ case TOPOLOGY_DEPTH_PKG:
printf("%4d|", cpu_top.core_info[cpu].pkg);
- if (topology_depth > 1)
+ break;
+ case TOPOLOGY_DEPTH_CORE:
printf("%4d|", cpu_top.core_info[cpu].core);
- if (topology_depth > 0)
+ break;
+ case TOPOLOGY_DEPTH_CPU:
printf("%4d|", cpu_top.core_info[cpu].cpu);
+ break;
+ default:
+ return;
+ }
for (mon = 0; mon < avail_monitors; mon++) {
if (mon != 0)
@@ -294,7 +313,10 @@ int fork_it(char **argv)
if (!child_pid) {
/* child */
- execvp(argv[0], argv);
+ if (execvp(argv[0], argv) == -1) {
+ printf("Invalid monitor command %s\n", argv[0]);
+ exit(errno);
+ }
} else {
/* parent */
if (child_pid == -1) {
@@ -423,11 +445,13 @@ int cmd_monitor(int argc, char **argv)
if (avail_monitors == 0) {
printf(_("No HW Cstate monitors found\n"));
+ cpu_topology_release(cpu_top);
return 1;
}
if (mode == list) {
list_monitors();
+ cpu_topology_release(cpu_top);
exit(EXIT_SUCCESS);
}
@@ -448,15 +472,15 @@ int cmd_monitor(int argc, char **argv)
/* ToDo: Topology parsing needs fixing first to do
this more generically */
if (cpu_top.pkgs > 1)
- print_header(3);
+ print_header(TOPOLOGY_DEPTH_PKG);
else
- print_header(1);
+ print_header(TOPOLOGY_DEPTH_CPU);
for (cpu = 0; cpu < cpu_count; cpu++) {
if (cpu_top.pkgs > 1)
- print_results(3, cpu);
+ print_results(TOPOLOGY_DEPTH_PKG, cpu);
else
- print_results(1, cpu);
+ print_results(TOPOLOGY_DEPTH_CPU, cpu);
}
for (num = 0; num < avail_monitors; num++) {
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8d5011a0bf60..26057af6b5a1 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1056,7 +1056,7 @@ static const struct platform_data turbostat_pdata[] = {
* Missing support for
* INTEL_ICELAKE
* INTEL_ATOM_SILVERMONT_MID
- * INTEL_ATOM_AIRMONT_MID
+ * INTEL_ATOM_SILVERMONT_MID2
* INTEL_ATOM_AIRMONT_NP
*/
{ 0, NULL },
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 7849405614b1..dc4333d23189 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -7,6 +7,13 @@
#ifndef __SCX_COMMON_BPF_H
#define __SCX_COMMON_BPF_H
+/*
+ * The generated kfunc prototypes in vmlinux.h are missing address space
+ * attributes which cause build failures. For now, suppress the generated
+ * prototypes. See https://github.com/sched-ext/scx/issues/1111.
+ */
+#define BPF_NO_KFUNC_PROTOTYPES
+
#ifdef LSP
#define __bpf__
#include "../vmlinux.h"
@@ -18,6 +25,7 @@
#include <bpf/bpf_tracing.h>
#include <asm-generic/errno.h>
#include "user_exit_info.h"
+#include "enum_defs.autogen.h"
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
@@ -62,21 +70,28 @@ void scx_bpf_dump_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym
u32 scx_bpf_cpuperf_cap(s32 cpu) __ksym __weak;
u32 scx_bpf_cpuperf_cur(s32 cpu) __ksym __weak;
void scx_bpf_cpuperf_set(s32 cpu, u32 perf) __ksym __weak;
+u32 scx_bpf_nr_node_ids(void) __ksym __weak;
u32 scx_bpf_nr_cpu_ids(void) __ksym __weak;
+int scx_bpf_cpu_node(s32 cpu) __ksym __weak;
const struct cpumask *scx_bpf_get_possible_cpumask(void) __ksym __weak;
const struct cpumask *scx_bpf_get_online_cpumask(void) __ksym __weak;
void scx_bpf_put_cpumask(const struct cpumask *cpumask) __ksym __weak;
+const struct cpumask *scx_bpf_get_idle_cpumask_node(int node) __ksym __weak;
const struct cpumask *scx_bpf_get_idle_cpumask(void) __ksym;
+const struct cpumask *scx_bpf_get_idle_smtmask_node(int node) __ksym __weak;
const struct cpumask *scx_bpf_get_idle_smtmask(void) __ksym;
void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym;
bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym;
+s32 scx_bpf_pick_idle_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
+s32 scx_bpf_pick_any_cpu_node(const cpumask_t *cpus_allowed, int node, u64 flags) __ksym __weak;
s32 scx_bpf_pick_any_cpu(const cpumask_t *cpus_allowed, u64 flags) __ksym;
bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak;
u64 scx_bpf_now(void) __ksym __weak;
+void scx_bpf_events(struct scx_event_stats *events, size_t events__sz) __ksym __weak;
/*
* Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from
@@ -84,6 +99,9 @@ u64 scx_bpf_now(void) __ksym __weak;
*/
#define BPF_FOR_EACH_ITER (&___it)
+#define scx_read_event(e, name) \
+ (bpf_core_field_exists((e)->name) ? (e)->name : 0)
+
static inline __attribute__((format(printf, 1, 2)))
void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
@@ -584,6 +602,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
__u.__val; \
})
+#define READ_ONCE_ARENA(type, x) \
+({ \
+ union { type __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ __read_once_size((void *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
+#define WRITE_ONCE_ARENA(type, x, val) \
+({ \
+ union { type __val; char __c[1]; } __u = \
+ { .__val = (val) }; \
+ __write_once_size((void *)&(x), __u.__c, sizeof(x)); \
+ __u.__val; \
+})
+
/*
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h
index dc18b99e55cd..1dc76bd84296 100644
--- a/tools/sched_ext/include/scx/common.h
+++ b/tools/sched_ext/include/scx/common.h
@@ -16,6 +16,7 @@
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
+#include "enum_defs.autogen.h"
typedef uint8_t u8;
typedef uint16_t u16;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index 50e1499ae093..9252e1a00556 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -125,12 +125,107 @@ bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter,
false; \
})
+/**
+ * __COMPAT_is_enq_cpu_selected - Test if SCX_ENQ_CPU_SELECTED is on
+ * in a compatible way. We will preserve this __COMPAT helper until v6.16.
+ *
+ * @enq_flags: enqueue flags from ops.enqueue()
+ *
+ * Return: True if SCX_ENQ_CPU_SELECTED is turned on in @enq_flags
+ */
+static inline bool __COMPAT_is_enq_cpu_selected(u64 enq_flags)
+{
+#ifdef HAVE_SCX_ENQ_CPU_SELECTED
+ /*
+ * This is the case that a BPF code compiled against vmlinux.h
+ * where the enum SCX_ENQ_CPU_SELECTED exists.
+ */
+
+ /*
+ * We should temporarily suspend the macro expansion of
+ * 'SCX_ENQ_CPU_SELECTED'. This avoids 'SCX_ENQ_CPU_SELECTED' being
+ * rewritten to '__SCX_ENQ_CPU_SELECTED' when 'SCX_ENQ_CPU_SELECTED'
+ * is defined in 'scripts/gen_enums.py'.
+ */
+#pragma push_macro("SCX_ENQ_CPU_SELECTED")
+#undef SCX_ENQ_CPU_SELECTED
+ u64 flag;
+
+ /*
+ * When the kernel did not have SCX_ENQ_CPU_SELECTED,
+ * select_task_rq_scx() has never been skipped. Thus, this case
+ * should be considered that the CPU has already been selected.
+ */
+ if (!bpf_core_enum_value_exists(enum scx_enq_flags,
+ SCX_ENQ_CPU_SELECTED))
+ return true;
+
+ flag = bpf_core_enum_value(enum scx_enq_flags, SCX_ENQ_CPU_SELECTED);
+ return enq_flags & flag;
+
+ /*
+ * Once done, resume the macro expansion of 'SCX_ENQ_CPU_SELECTED'.
+ */
+#pragma pop_macro("SCX_ENQ_CPU_SELECTED")
+#else
+ /*
+ * This is the case that a BPF code compiled against vmlinux.h
+ * where the enum SCX_ENQ_CPU_SELECTED does NOT exist.
+ */
+ return true;
+#endif /* HAVE_SCX_ENQ_CPU_SELECTED */
+}
+
+
#define scx_bpf_now() \
(bpf_ksym_exists(scx_bpf_now) ? \
scx_bpf_now() : \
bpf_ktime_get_ns())
/*
+ * v6.15: Introduce event counters.
+ *
+ * Preserve the following macro until v6.17.
+ */
+#define __COMPAT_scx_bpf_events(events, size) \
+ (bpf_ksym_exists(scx_bpf_events) ? \
+ scx_bpf_events(events, size) : ({}))
+
+/*
+ * v6.15: Introduce NUMA-aware kfuncs to operate with per-node idle
+ * cpumasks.
+ *
+ * Preserve the following __COMPAT_scx_*_node macros until v6.17.
+ */
+#define __COMPAT_scx_bpf_nr_node_ids() \
+ (bpf_ksym_exists(scx_bpf_nr_node_ids) ? \
+ scx_bpf_nr_node_ids() : 1U)
+
+#define __COMPAT_scx_bpf_cpu_node(cpu) \
+ (bpf_ksym_exists(scx_bpf_cpu_node) ? \
+ scx_bpf_cpu_node(cpu) : 0)
+
+#define __COMPAT_scx_bpf_get_idle_cpumask_node(node) \
+ (bpf_ksym_exists(scx_bpf_get_idle_cpumask_node) ? \
+ scx_bpf_get_idle_cpumask_node(node) : \
+ scx_bpf_get_idle_cpumask()) \
+
+#define __COMPAT_scx_bpf_get_idle_smtmask_node(node) \
+ (bpf_ksym_exists(scx_bpf_get_idle_smtmask_node) ? \
+ scx_bpf_get_idle_smtmask_node(node) : \
+ scx_bpf_get_idle_smtmask())
+
+#define __COMPAT_scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) \
+ (bpf_ksym_exists(scx_bpf_pick_idle_cpu_node) ? \
+ scx_bpf_pick_idle_cpu_node(cpus_allowed, node, flags) : \
+ scx_bpf_pick_idle_cpu(cpus_allowed, flags))
+
+#define __COMPAT_scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) \
+ (bpf_ksym_exists(scx_bpf_pick_any_cpu_node) ? \
+ scx_bpf_pick_any_cpu_node(cpus_allowed, node, flags) : \
+ scx_bpf_pick_any_cpu(cpus_allowed, flags))
+
+/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
*/
diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h
index b50280e2ba2b..35c67c5174ac 100644
--- a/tools/sched_ext/include/scx/compat.h
+++ b/tools/sched_ext/include/scx/compat.h
@@ -106,8 +106,20 @@ static inline bool __COMPAT_struct_has_field(const char *type, const char *field
return false;
}
-#define SCX_OPS_SWITCH_PARTIAL \
- __COMPAT_ENUM_OR_ZERO("scx_ops_flags", "SCX_OPS_SWITCH_PARTIAL")
+#define SCX_OPS_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_ops_flags", #name)
+
+#define SCX_OPS_KEEP_BUILTIN_IDLE SCX_OPS_FLAG(SCX_OPS_KEEP_BUILTIN_IDLE)
+#define SCX_OPS_ENQ_LAST SCX_OPS_FLAG(SCX_OPS_ENQ_LAST)
+#define SCX_OPS_ENQ_EXITING SCX_OPS_FLAG(SCX_OPS_ENQ_EXITING)
+#define SCX_OPS_SWITCH_PARTIAL SCX_OPS_FLAG(SCX_OPS_SWITCH_PARTIAL)
+#define SCX_OPS_ENQ_MIGRATION_DISABLED SCX_OPS_FLAG(SCX_OPS_ENQ_MIGRATION_DISABLED)
+#define SCX_OPS_ALLOW_QUEUED_WAKEUP SCX_OPS_FLAG(SCX_OPS_ALLOW_QUEUED_WAKEUP)
+#define SCX_OPS_BUILTIN_IDLE_PER_NODE SCX_OPS_FLAG(SCX_OPS_BUILTIN_IDLE_PER_NODE)
+
+#define SCX_PICK_IDLE_FLAG(name) __COMPAT_ENUM_OR_ZERO("scx_pick_idle_cpu_flags", #name)
+
+#define SCX_PICK_IDLE_CORE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_CORE)
+#define SCX_PICK_IDLE_IN_NODE SCX_PICK_IDLE_FLAG(SCX_PICK_IDLE_IN_NODE)
static inline long scx_hotplug_seq(void)
{
diff --git a/tools/sched_ext/include/scx/enum_defs.autogen.h b/tools/sched_ext/include/scx/enum_defs.autogen.h
new file mode 100644
index 000000000000..6e6c45f14fe1
--- /dev/null
+++ b/tools/sched_ext/include/scx/enum_defs.autogen.h
@@ -0,0 +1,120 @@
+/*
+ * WARNING: This file is autogenerated from gen_enum_defs.py [1].
+ *
+ * [1] https://github.com/sched-ext/scx/blob/main/scripts/gen_enum_defs.py
+ */
+
+#ifndef __ENUM_DEFS_AUTOGEN_H__
+#define __ENUM_DEFS_AUTOGEN_H__
+
+#define HAVE_SCX_DSP_DFL_MAX_BATCH
+#define HAVE_SCX_DSP_MAX_LOOPS
+#define HAVE_SCX_WATCHDOG_MAX_TIMEOUT
+#define HAVE_SCX_EXIT_BT_LEN
+#define HAVE_SCX_EXIT_MSG_LEN
+#define HAVE_SCX_EXIT_DUMP_DFL_LEN
+#define HAVE_SCX_CPUPERF_ONE
+#define HAVE_SCX_OPS_TASK_ITER_BATCH
+#define HAVE_SCX_CPU_PREEMPT_RT
+#define HAVE_SCX_CPU_PREEMPT_DL
+#define HAVE_SCX_CPU_PREEMPT_STOP
+#define HAVE_SCX_CPU_PREEMPT_UNKNOWN
+#define HAVE_SCX_DEQ_SLEEP
+#define HAVE_SCX_DEQ_CORE_SCHED_EXEC
+#define HAVE_SCX_DSQ_FLAG_BUILTIN
+#define HAVE_SCX_DSQ_FLAG_LOCAL_ON
+#define HAVE_SCX_DSQ_INVALID
+#define HAVE_SCX_DSQ_GLOBAL
+#define HAVE_SCX_DSQ_LOCAL
+#define HAVE_SCX_DSQ_LOCAL_ON
+#define HAVE_SCX_DSQ_LOCAL_CPU_MASK
+#define HAVE_SCX_DSQ_ITER_REV
+#define HAVE___SCX_DSQ_ITER_HAS_SLICE
+#define HAVE___SCX_DSQ_ITER_HAS_VTIME
+#define HAVE___SCX_DSQ_ITER_USER_FLAGS
+#define HAVE___SCX_DSQ_ITER_ALL_FLAGS
+#define HAVE_SCX_DSQ_LNODE_ITER_CURSOR
+#define HAVE___SCX_DSQ_LNODE_PRIV_SHIFT
+#define HAVE_SCX_ENQ_WAKEUP
+#define HAVE_SCX_ENQ_HEAD
+#define HAVE_SCX_ENQ_CPU_SELECTED
+#define HAVE_SCX_ENQ_PREEMPT
+#define HAVE_SCX_ENQ_REENQ
+#define HAVE_SCX_ENQ_LAST
+#define HAVE___SCX_ENQ_INTERNAL_MASK
+#define HAVE_SCX_ENQ_CLEAR_OPSS
+#define HAVE_SCX_ENQ_DSQ_PRIQ
+#define HAVE_SCX_TASK_DSQ_ON_PRIQ
+#define HAVE_SCX_TASK_QUEUED
+#define HAVE_SCX_TASK_RESET_RUNNABLE_AT
+#define HAVE_SCX_TASK_DEQD_FOR_SLEEP
+#define HAVE_SCX_TASK_STATE_SHIFT
+#define HAVE_SCX_TASK_STATE_BITS
+#define HAVE_SCX_TASK_STATE_MASK
+#define HAVE_SCX_TASK_CURSOR
+#define HAVE_SCX_ECODE_RSN_HOTPLUG
+#define HAVE_SCX_ECODE_ACT_RESTART
+#define HAVE_SCX_EXIT_NONE
+#define HAVE_SCX_EXIT_DONE
+#define HAVE_SCX_EXIT_UNREG
+#define HAVE_SCX_EXIT_UNREG_BPF
+#define HAVE_SCX_EXIT_UNREG_KERN
+#define HAVE_SCX_EXIT_SYSRQ
+#define HAVE_SCX_EXIT_ERROR
+#define HAVE_SCX_EXIT_ERROR_BPF
+#define HAVE_SCX_EXIT_ERROR_STALL
+#define HAVE_SCX_KF_UNLOCKED
+#define HAVE_SCX_KF_CPU_RELEASE
+#define HAVE_SCX_KF_DISPATCH
+#define HAVE_SCX_KF_ENQUEUE
+#define HAVE_SCX_KF_SELECT_CPU
+#define HAVE_SCX_KF_REST
+#define HAVE___SCX_KF_RQ_LOCKED
+#define HAVE___SCX_KF_TERMINAL
+#define HAVE_SCX_KICK_IDLE
+#define HAVE_SCX_KICK_PREEMPT
+#define HAVE_SCX_KICK_WAIT
+#define HAVE_SCX_OPI_BEGIN
+#define HAVE_SCX_OPI_NORMAL_BEGIN
+#define HAVE_SCX_OPI_NORMAL_END
+#define HAVE_SCX_OPI_CPU_HOTPLUG_BEGIN
+#define HAVE_SCX_OPI_CPU_HOTPLUG_END
+#define HAVE_SCX_OPI_END
+#define HAVE_SCX_OPS_ENABLING
+#define HAVE_SCX_OPS_ENABLED
+#define HAVE_SCX_OPS_DISABLING
+#define HAVE_SCX_OPS_DISABLED
+#define HAVE_SCX_OPS_KEEP_BUILTIN_IDLE
+#define HAVE_SCX_OPS_ENQ_LAST
+#define HAVE_SCX_OPS_ENQ_EXITING
+#define HAVE_SCX_OPS_SWITCH_PARTIAL
+#define HAVE_SCX_OPS_HAS_CGROUP_WEIGHT
+#define HAVE_SCX_OPS_ALL_FLAGS
+#define HAVE_SCX_OPSS_NONE
+#define HAVE_SCX_OPSS_QUEUEING
+#define HAVE_SCX_OPSS_QUEUED
+#define HAVE_SCX_OPSS_DISPATCHING
+#define HAVE_SCX_OPSS_QSEQ_SHIFT
+#define HAVE_SCX_PICK_IDLE_CORE
+#define HAVE_SCX_OPS_NAME_LEN
+#define HAVE_SCX_SLICE_DFL
+#define HAVE_SCX_SLICE_INF
+#define HAVE_SCX_RQ_ONLINE
+#define HAVE_SCX_RQ_CAN_STOP_TICK
+#define HAVE_SCX_RQ_BAL_PENDING
+#define HAVE_SCX_RQ_BAL_KEEP
+#define HAVE_SCX_RQ_BYPASSING
+#define HAVE_SCX_RQ_IN_WAKEUP
+#define HAVE_SCX_RQ_IN_BALANCE
+#define HAVE_SCX_TASK_NONE
+#define HAVE_SCX_TASK_INIT
+#define HAVE_SCX_TASK_READY
+#define HAVE_SCX_TASK_ENABLED
+#define HAVE_SCX_TASK_NR_STATES
+#define HAVE_SCX_TG_ONLINE
+#define HAVE_SCX_TG_INITED
+#define HAVE_SCX_WAKE_FORK
+#define HAVE_SCX_WAKE_TTWU
+#define HAVE_SCX_WAKE_SYNC
+
+#endif /* __ENUM_DEFS_AUTOGEN_H__ */
diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
index 1e9f74525d8f..6ba6e610eeaa 100644
--- a/tools/sched_ext/scx_central.c
+++ b/tools/sched_ext/scx_central.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <inttypes.h>
#include <signal.h>
+#include <assert.h>
#include <libgen.h>
#include <bpf/bpf.h>
#include <scx/common.h>
@@ -60,14 +61,22 @@ restart:
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
+ assert(skel->rodata->nr_cpu_ids <= INT32_MAX);
+
while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
switch (opt) {
case 's':
skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000;
break;
- case 'c':
- skel->rodata->central_cpu = strtoul(optarg, NULL, 0);
+ case 'c': {
+ u32 central_cpu = strtoul(optarg, NULL, 0);
+ if (central_cpu >= skel->rodata->nr_cpu_ids) {
+ fprintf(stderr, "invalid central CPU id value, %u given (%u max)\n", central_cpu, skel->rodata->nr_cpu_ids);
+ return -1;
+ }
+ skel->rodata->central_cpu = (s32)central_cpu;
break;
+ }
case 'v':
verbose = true;
break;
@@ -96,7 +105,7 @@ restart:
*/
cpuset = CPU_ALLOC(skel->rodata->nr_cpu_ids);
SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
- CPU_ZERO(cpuset);
+ CPU_ZERO_S(CPU_ALLOC_SIZE(skel->rodata->nr_cpu_ids), cpuset);
CPU_SET(skel->rodata->central_cpu, cpuset);
SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
"Failed to affinitize to central CPU %d (max %d)",
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index 3a20bb0c014a..26c40ca4f36c 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -231,7 +231,7 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags)
}
/* if select_cpu() wasn't called, try direct dispatch */
- if (!(enq_flags & SCX_ENQ_CPU_SELECTED) &&
+ if (!__COMPAT_is_enq_cpu_selected(enq_flags) &&
(cpu = pick_direct_dispatch_cpu(p, scx_bpf_task_cpu(p))) >= 0) {
__sync_fetch_and_add(&nr_ddsp_from_enq, 1);
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, slice_ns, enq_flags);
@@ -763,6 +763,8 @@ static void dump_shared_dsq(void)
static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
{
+ struct scx_event_stats events;
+
bpf_rcu_read_lock();
dispatch_highpri(true);
bpf_rcu_read_unlock();
@@ -772,6 +774,25 @@ static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer)
if (print_shared_dsq)
dump_shared_dsq();
+ __COMPAT_scx_bpf_events(&events, sizeof(events));
+
+ bpf_printk("%35s: %lld", "SCX_EV_SELECT_CPU_FALLBACK",
+ scx_read_event(&events, SCX_EV_SELECT_CPU_FALLBACK));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE",
+ scx_read_event(&events, SCX_EV_DISPATCH_LOCAL_DSQ_OFFLINE));
+ bpf_printk("%35s: %lld", "SCX_EV_DISPATCH_KEEP_LAST",
+ scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST));
+ bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING",
+ scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING));
+ bpf_printk("%35s: %lld", "SCX_EV_ENQ_SLICE_DFL",
+ scx_read_event(&events, SCX_EV_ENQ_SLICE_DFL));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION",
+ scx_read_event(&events, SCX_EV_BYPASS_DURATION));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH",
+ scx_read_event(&events, SCX_EV_BYPASS_DISPATCH));
+ bpf_printk("%35s: %lld", "SCX_EV_BYPASS_ACTIVATE",
+ scx_read_event(&events, SCX_EV_BYPASS_ACTIVATE));
+
bpf_timer_start(timer, ONE_SEC_IN_NS, 0);
return 0;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 8daac70c2f9d..2ebaf5e6942e 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -35,6 +35,7 @@ TARGETS += filesystems/epoll
TARGETS += filesystems/fat
TARGETS += filesystems/overlayfs
TARGETS += filesystems/statmount
+TARGETS += filesystems/mount-notify
TARGETS += firmware
TARGETS += fpu
TARGETS += ftrace
diff --git a/tools/testing/selftests/arm64/fp/kernel-test.c b/tools/testing/selftests/arm64/fp/kernel-test.c
index 348e8bef62c7..e3cec3723ffa 100644
--- a/tools/testing/selftests/arm64/fp/kernel-test.c
+++ b/tools/testing/selftests/arm64/fp/kernel-test.c
@@ -46,7 +46,6 @@ static void handle_kick_signal(int sig, siginfo_t *info, void *context)
}
static char *drivers[] = {
- "crct10dif-arm64",
"sha1-ce",
"sha224-arm64",
"sha224-arm64-neon",
diff --git a/tools/testing/selftests/arm64/mte/check_hugetlb_options.c b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
index 303260a6dc65..3bfcd3848432 100644
--- a/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
+++ b/tools/testing/selftests/arm64/mte/check_hugetlb_options.c
@@ -227,6 +227,8 @@ static int check_child_hugetlb_memory_mapping(int mem_type, int mode, int mappin
int main(int argc, char *argv[])
{
int err;
+ void *map_ptr;
+ unsigned long map_size;
err = mte_default_setup();
if (err)
@@ -243,6 +245,15 @@ int main(int argc, char *argv[])
return KSFT_FAIL;
}
+ /* Check if MTE supports hugetlb mappings */
+ map_size = default_huge_page_size();
+ map_ptr = mmap(NULL, map_size, PROT_READ | PROT_MTE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
+ if (map_ptr == MAP_FAILED)
+ ksft_exit_skip("PROT_MTE not supported with MAP_HUGETLB mappings\n");
+ else
+ munmap(map_ptr, map_size);
+
/* Set test plan */
ksft_set_plan(12);
@@ -270,13 +281,13 @@ int main(int argc, char *argv[])
"Check clear PROT_MTE flags with private mapping and sync error mode and mmap/mprotect memory\n");
evaluate_test(check_child_hugetlb_memory_mapping(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
- "Check child hugetlb memory with private mapping, precise mode and mmap memory\n");
+ "Check child hugetlb memory with private mapping, sync error mode and mmap memory\n");
evaluate_test(check_child_hugetlb_memory_mapping(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
- "Check child hugetlb memory with private mapping, precise mode and mmap memory\n");
+ "Check child hugetlb memory with private mapping, async error mode and mmap memory\n");
evaluate_test(check_child_hugetlb_memory_mapping(USE_MPROTECT, MTE_SYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
- "Check child hugetlb memory with private mapping, precise mode and mmap/mprotect memory\n");
+ "Check child hugetlb memory with private mapping, sync error mode and mmap/mprotect memory\n");
evaluate_test(check_child_hugetlb_memory_mapping(USE_MPROTECT, MTE_ASYNC_ERR, MAP_PRIVATE | MAP_HUGETLB),
- "Check child hugetlb memory with private mapping, precise mode and mmap/mprotect memory\n");
+ "Check child hugetlb memory with private mapping, async error mode and mmap/mprotect memory\n");
mte_restore_setup();
free_hugetlb();
diff --git a/tools/testing/selftests/filesystems/mount-notify/.gitignore b/tools/testing/selftests/filesystems/mount-notify/.gitignore
new file mode 100644
index 000000000000..82a4846cbc4b
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/*_test
diff --git a/tools/testing/selftests/filesystems/mount-notify/Makefile b/tools/testing/selftests/filesystems/mount-notify/Makefile
new file mode 100644
index 000000000000..10be0227b5ae
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+CFLAGS += -Wall -O2 -g $(KHDR_INCLUDES)
+TEST_GEN_PROGS := mount-notify_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
new file mode 100644
index 000000000000..4a2d5c454fd1
--- /dev/null
+++ b/tools/testing/selftests/filesystems/mount-notify/mount-notify_test.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (c) 2025 Miklos Szeredi <miklos@szeredi.hu>
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/mount.h>
+#include <linux/fanotify.h>
+#include <unistd.h>
+#include <sys/fanotify.h>
+#include <sys/syscall.h>
+
+#include "../../kselftest_harness.h"
+#include "../statmount/statmount.h"
+
+#ifndef FAN_MNT_ATTACH
+struct fanotify_event_info_mnt {
+ struct fanotify_event_info_header hdr;
+ __u64 mnt_id;
+};
+#define FAN_MNT_ATTACH 0x01000000 /* Mount was attached */
+#endif
+
+#ifndef FAN_MNT_DETACH
+#define FAN_MNT_DETACH 0x02000000 /* Mount was detached */
+#endif
+
+#ifndef FAN_REPORT_MNT
+#define FAN_REPORT_MNT 0x00004000 /* Report mount events */
+#endif
+
+#ifndef FAN_MARK_MNTNS
+#define FAN_MARK_MNTNS 0x00000110
+#endif
+
+static uint64_t get_mnt_id(struct __test_metadata *const _metadata,
+ const char *path)
+{
+ struct statx sx;
+
+ ASSERT_EQ(statx(AT_FDCWD, path, 0, STATX_MNT_ID_UNIQUE, &sx), 0);
+ ASSERT_TRUE(!!(sx.stx_mask & STATX_MNT_ID_UNIQUE));
+ return sx.stx_mnt_id;
+}
+
+static const char root_mntpoint_templ[] = "/tmp/mount-notify_test_root.XXXXXX";
+
+FIXTURE(fanotify) {
+ int fan_fd;
+ char buf[256];
+ unsigned int rem;
+ void *next;
+ char root_mntpoint[sizeof(root_mntpoint_templ)];
+ int orig_root;
+ int ns_fd;
+ uint64_t root_id;
+};
+
+FIXTURE_SETUP(fanotify)
+{
+ int ret;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ self->ns_fd = open("/proc/self/ns/mnt", O_RDONLY);
+ ASSERT_GE(self->ns_fd, 0);
+
+ ASSERT_EQ(mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL), 0);
+
+ strcpy(self->root_mntpoint, root_mntpoint_templ);
+ ASSERT_NE(mkdtemp(self->root_mntpoint), NULL);
+
+ self->orig_root = open("/", O_PATH | O_CLOEXEC);
+ ASSERT_GE(self->orig_root, 0);
+
+ ASSERT_EQ(mount("tmpfs", self->root_mntpoint, "tmpfs", 0, NULL), 0);
+
+ ASSERT_EQ(chroot(self->root_mntpoint), 0);
+
+ ASSERT_EQ(chdir("/"), 0);
+
+ ASSERT_EQ(mkdir("a", 0700), 0);
+
+ ASSERT_EQ(mkdir("b", 0700), 0);
+
+ self->root_id = get_mnt_id(_metadata, "/");
+ ASSERT_NE(self->root_id, 0);
+
+ self->fan_fd = fanotify_init(FAN_REPORT_MNT, 0);
+ ASSERT_GE(self->fan_fd, 0);
+
+ ret = fanotify_mark(self->fan_fd, FAN_MARK_ADD | FAN_MARK_MNTNS,
+ FAN_MNT_ATTACH | FAN_MNT_DETACH, self->ns_fd, NULL);
+ ASSERT_EQ(ret, 0);
+
+ self->rem = 0;
+}
+
+FIXTURE_TEARDOWN(fanotify)
+{
+ ASSERT_EQ(self->rem, 0);
+ close(self->fan_fd);
+
+ ASSERT_EQ(fchdir(self->orig_root), 0);
+
+ ASSERT_EQ(chroot("."), 0);
+
+ EXPECT_EQ(umount2(self->root_mntpoint, MNT_DETACH), 0);
+ EXPECT_EQ(chdir(self->root_mntpoint), 0);
+ EXPECT_EQ(chdir("/"), 0);
+ EXPECT_EQ(rmdir(self->root_mntpoint), 0);
+}
+
+static uint64_t expect_notify(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t *mask)
+{
+ struct fanotify_event_metadata *meta;
+ struct fanotify_event_info_mnt *mnt;
+ unsigned int thislen;
+
+ if (!self->rem) {
+ ssize_t len = read(self->fan_fd, self->buf, sizeof(self->buf));
+ ASSERT_GT(len, 0);
+
+ self->rem = len;
+ self->next = (void *) self->buf;
+ }
+
+ meta = self->next;
+ ASSERT_TRUE(FAN_EVENT_OK(meta, self->rem));
+
+ thislen = meta->event_len;
+ self->rem -= thislen;
+ self->next += thislen;
+
+ *mask = meta->mask;
+ thislen -= sizeof(*meta);
+
+ mnt = ((void *) meta) + meta->event_len - thislen;
+
+ ASSERT_EQ(thislen, sizeof(*mnt));
+
+ return mnt->mnt_id;
+}
+
+static void expect_notify_n(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ unsigned int n, uint64_t mask[], uint64_t mnts[])
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ mnts[i] = expect_notify(_metadata, self, &mask[i]);
+}
+
+static uint64_t expect_notify_mask(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t expect_mask)
+{
+ uint64_t mntid, mask;
+
+ mntid = expect_notify(_metadata, self, &mask);
+ ASSERT_EQ(expect_mask, mask);
+
+ return mntid;
+}
+
+
+static void expect_notify_mask_n(struct __test_metadata *const _metadata,
+ FIXTURE_DATA(fanotify) *self,
+ uint64_t mask, unsigned int n, uint64_t mnts[])
+{
+ unsigned int i;
+
+ for (i = 0; i < n; i++)
+ mnts[i] = expect_notify_mask(_metadata, self, mask);
+}
+
+static void verify_mount_ids(struct __test_metadata *const _metadata,
+ const uint64_t list1[], const uint64_t list2[],
+ size_t num)
+{
+ unsigned int i, j;
+
+ // Check that neither list has any duplicates
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < num; j++) {
+ if (i != j) {
+ ASSERT_NE(list1[i], list1[j]);
+ ASSERT_NE(list2[i], list2[j]);
+ }
+ }
+ }
+ // Check that all list1 memebers can be found in list2. Together with
+ // the above it means that the list1 and list2 represent the same sets.
+ for (i = 0; i < num; i++) {
+ for (j = 0; j < num; j++) {
+ if (list1[i] == list2[j])
+ break;
+ }
+ ASSERT_NE(j, num);
+ }
+}
+
+static void check_mounted(struct __test_metadata *const _metadata,
+ const uint64_t mnts[], size_t num)
+{
+ ssize_t ret;
+ uint64_t *list;
+
+ list = malloc((num + 1) * sizeof(list[0]));
+ ASSERT_NE(list, NULL);
+
+ ret = listmount(LSMT_ROOT, 0, 0, list, num + 1, 0);
+ ASSERT_EQ(ret, num);
+
+ verify_mount_ids(_metadata, mnts, list, num);
+
+ free(list);
+}
+
+static void setup_mount_tree(struct __test_metadata *const _metadata,
+ int log2_num)
+{
+ int ret, i;
+
+ ret = mount("", "/", NULL, MS_SHARED, NULL);
+ ASSERT_EQ(ret, 0);
+
+ for (i = 0; i < log2_num; i++) {
+ ret = mount("/", "/", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+ }
+}
+
+TEST_F(fanotify, bind)
+{
+ int ret;
+ uint64_t mnts[2] = { self->root_id };
+
+ ret = mount("/", "/", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ // Cleanup
+ uint64_t detach_id;
+ ret = umount("/");
+ ASSERT_EQ(ret, 0);
+
+ detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH);
+ ASSERT_EQ(detach_id, mnts[1]);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, move)
+{
+ int ret;
+ uint64_t mnts[2] = { self->root_id };
+ uint64_t move_id;
+
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ ret = move_mount(AT_FDCWD, "/a", AT_FDCWD, "/b", 0);
+ ASSERT_EQ(ret, 0);
+
+ move_id = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH);
+ ASSERT_EQ(move_id, mnts[1]);
+
+ // Cleanup
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, propagate)
+{
+ const unsigned int log2_num = 4;
+ const unsigned int num = (1 << log2_num);
+ uint64_t mnts[num];
+
+ setup_mount_tree(_metadata, log2_num);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, num - 1, mnts + 1);
+
+ mnts[0] = self->root_id;
+ check_mounted(_metadata, mnts, num);
+
+ // Cleanup
+ int ret;
+ uint64_t mnts2[num];
+ ret = umount2("/", MNT_DETACH);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/", NULL, MS_PRIVATE, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts2[0] = self->root_id;
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, num - 1, mnts2 + 1);
+ verify_mount_ids(_metadata, mnts, mnts2, num);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, fsmount)
+{
+ int ret, fs, mnt;
+ uint64_t mnts[2] = { self->root_id };
+
+ fs = fsopen("tmpfs", 0);
+ ASSERT_GE(fs, 0);
+
+ ret = fsconfig(fs, FSCONFIG_CMD_CREATE, 0, 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ mnt = fsmount(fs, 0, 0);
+ ASSERT_GE(mnt, 0);
+
+ close(fs);
+
+ ret = move_mount(mnt, "", AT_FDCWD, "/a", MOVE_MOUNT_F_EMPTY_PATH);
+ ASSERT_EQ(ret, 0);
+
+ close(mnt);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ ASSERT_NE(mnts[0], mnts[1]);
+
+ check_mounted(_metadata, mnts, 2);
+
+ // Cleanup
+ uint64_t detach_id;
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ detach_id = expect_notify_mask(_metadata, self, FAN_MNT_DETACH);
+ ASSERT_EQ(detach_id, mnts[1]);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, reparent)
+{
+ uint64_t mnts[6] = { self->root_id };
+ uint64_t dmnts[3];
+ uint64_t masks[3];
+ unsigned int i;
+ int ret;
+
+ // Create setup with a[1] -> b[2] propagation
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/a", NULL, MS_SHARED, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/a", "/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("", "/b", NULL, MS_SLAVE, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1);
+
+ check_mounted(_metadata, mnts, 3);
+
+ // Mount on a[3], which is propagated to b[4]
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 3);
+
+ check_mounted(_metadata, mnts, 5);
+
+ // Mount on b[5], not propagated
+ ret = mount("/", "/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[5] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+
+ check_mounted(_metadata, mnts, 6);
+
+ // Umount a[3], which is propagated to b[4], but not b[5]
+ // This will result in b[5] "falling" on b[2]
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_n(_metadata, self, 3, masks, dmnts);
+ verify_mount_ids(_metadata, mnts + 3, dmnts, 3);
+
+ for (i = 0; i < 3; i++) {
+ if (dmnts[i] == mnts[5]) {
+ ASSERT_EQ(masks[i], FAN_MNT_ATTACH | FAN_MNT_DETACH);
+ } else {
+ ASSERT_EQ(masks[i], FAN_MNT_DETACH);
+ }
+ }
+
+ mnts[3] = mnts[5];
+ check_mounted(_metadata, mnts, 4);
+
+ // Cleanup
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/b");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 3, dmnts);
+ verify_mount_ids(_metadata, mnts + 1, dmnts, 3);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_F(fanotify, rmdir)
+{
+ uint64_t mnts[3] = { self->root_id };
+ int ret;
+
+ ret = mount("/", "/a", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/", "/a/b", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH, 2, mnts + 1);
+
+ check_mounted(_metadata, mnts, 3);
+
+ ret = chdir("/a");
+ ASSERT_EQ(ret, 0);
+
+ ret = fork();
+ ASSERT_GE(ret, 0);
+
+ if (ret == 0) {
+ chdir("/");
+ unshare(CLONE_NEWNS);
+ mount("", "/", NULL, MS_REC|MS_PRIVATE, NULL);
+ umount2("/a", MNT_DETACH);
+ // This triggers a detach in the other namespace
+ rmdir("/a");
+ exit(0);
+ }
+ wait(NULL);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_DETACH, 2, mnts + 1);
+ check_mounted(_metadata, mnts, 1);
+
+ // Cleanup
+ ret = chdir("/");
+ ASSERT_EQ(ret, 0);
+}
+
+TEST_F(fanotify, pivot_root)
+{
+ uint64_t mnts[3] = { self->root_id };
+ uint64_t mnts2[3];
+ int ret;
+
+ ret = mount("tmpfs", "/a", "tmpfs", 0, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[2] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+
+ ret = mkdir("/a/new", 0700);
+ ASSERT_EQ(ret, 0);
+
+ ret = mkdir("/a/old", 0700);
+ ASSERT_EQ(ret, 0);
+
+ ret = mount("/a", "/a/new", NULL, MS_BIND, NULL);
+ ASSERT_EQ(ret, 0);
+
+ mnts[1] = expect_notify_mask(_metadata, self, FAN_MNT_ATTACH);
+ check_mounted(_metadata, mnts, 3);
+
+ ret = syscall(SYS_pivot_root, "/a/new", "/a/new/old");
+ ASSERT_EQ(ret, 0);
+
+ expect_notify_mask_n(_metadata, self, FAN_MNT_ATTACH | FAN_MNT_DETACH, 2, mnts2);
+ verify_mount_ids(_metadata, mnts, mnts2, 2);
+ check_mounted(_metadata, mnts, 3);
+
+ // Cleanup
+ ret = syscall(SYS_pivot_root, "/old", "/old/a/new");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a/new");
+ ASSERT_EQ(ret, 0);
+
+ ret = umount("/a");
+ ASSERT_EQ(ret, 0);
+
+ check_mounted(_metadata, mnts, 1);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
index 457cf76f3c5f..a3d8015897e9 100644
--- a/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
+++ b/tools/testing/selftests/filesystems/nsfs/iterate_mntns.c
@@ -3,6 +3,8 @@
#define _GNU_SOURCE
#include <fcntl.h>
+#include <linux/auto_dev-ioctl.h>
+#include <linux/errno.h>
#include <sched.h>
#include <stdio.h>
#include <string.h>
@@ -146,4 +148,16 @@ TEST_F(iterate_mount_namespaces, iterate_backward)
}
}
+TEST_F(iterate_mount_namespaces, nfs_valid_ioctl)
+{
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_OPENMOUNT, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_CLOSEMOUNT, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+
+ ASSERT_NE(ioctl(self->fd_mnt_ns[0], AUTOFS_DEV_IOCTL_READY, NULL), 0);
+ ASSERT_EQ(errno, ENOTTY);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/overlayfs/Makefile b/tools/testing/selftests/filesystems/overlayfs/Makefile
index e8d1adb021af..6c661232b3b5 100644
--- a/tools/testing/selftests/filesystems/overlayfs/Makefile
+++ b/tools/testing/selftests/filesystems/overlayfs/Makefile
@@ -1,7 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := dev_in_maps set_layers_via_fds
+CFLAGS += -Wall
+CFLAGS += $(KHDR_INCLUDES)
+LDLIBS += -lcap
-CFLAGS := -Wall -Werror
+LOCAL_HDRS += wrappers.h log.h
+
+TEST_GEN_PROGS := dev_in_maps
+TEST_GEN_PROGS += set_layers_via_fds
include ../../lib.mk
+
+$(OUTPUT)/set_layers_via_fds: ../utils.c
diff --git a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
index 1d0ae785a667..5074e64e74a8 100644
--- a/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
+++ b/tools/testing/selftests/filesystems/overlayfs/set_layers_via_fds.c
@@ -6,26 +6,40 @@
#include <sched.h>
#include <stdio.h>
#include <string.h>
+#include <sys/socket.h>
#include <sys/stat.h>
+#include <sys/sysmacros.h>
#include <sys/mount.h>
#include <unistd.h>
#include "../../kselftest_harness.h"
+#include "../../pidfd/pidfd.h"
#include "log.h"
+#include "../utils.h"
#include "wrappers.h"
FIXTURE(set_layers_via_fds) {
+ int pidfd;
};
FIXTURE_SETUP(set_layers_via_fds)
{
- ASSERT_EQ(mkdir("/set_layers_via_fds", 0755), 0);
+ self->pidfd = -EBADF;
+ EXPECT_EQ(mkdir("/set_layers_via_fds", 0755), 0);
+ EXPECT_EQ(mkdir("/set_layers_via_fds_tmpfs", 0755), 0);
}
FIXTURE_TEARDOWN(set_layers_via_fds)
{
+ if (self->pidfd >= 0) {
+ EXPECT_EQ(sys_pidfd_send_signal(self->pidfd, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(close(self->pidfd), 0);
+ }
umount2("/set_layers_via_fds", 0);
- ASSERT_EQ(rmdir("/set_layers_via_fds"), 0);
+ EXPECT_EQ(rmdir("/set_layers_via_fds"), 0);
+
+ umount2("/set_layers_via_fds_tmpfs", 0);
+ EXPECT_EQ(rmdir("/set_layers_via_fds_tmpfs"), 0);
}
TEST_F(set_layers_via_fds, set_layers_via_fds)
@@ -214,4 +228,493 @@ TEST_F(set_layers_via_fds, set_500_layers_via_fds)
ASSERT_EQ(close(fd_overlay), 0);
}
+TEST_F(set_layers_via_fds, set_override_creds)
+{
+ int fd_context, fd_tmpfs, fd_overlay;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int pidfd;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_STRING, "metacopy", "on", 0), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "nooverride_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_GE(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_override_creds_invalid)
+{
+ int fd_context, fd_tmpfs, fd_overlay, ret;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int fd_userns1, fd_userns2;
+ int ipc_sockets[2];
+ char c;
+ const unsigned int predictable_fd_context_nr = 123;
+
+ fd_userns1 = get_userns_fd(0, 0, 10000);
+ ASSERT_GE(fd_userns1, 0);
+
+ fd_userns2 = get_userns_fd(0, 1234, 10000);
+ ASSERT_GE(fd_userns2, 0);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ ASSERT_GE(ret, 0);
+
+ pid = create_child(&self->pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (close(ipc_sockets[0])) {
+ TH_LOG("close should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (!switch_userns(fd_userns2, 0, 0, false)) {
+ TH_LOG("switch_userns should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (read_nointr(ipc_sockets[1], &c, 1) != 1) {
+ TH_LOG("read_nointr should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (close(ipc_sockets[1])) {
+ TH_LOG("close should have succeeded");
+ _exit(EXIT_FAILURE);
+ }
+
+ if (!sys_fsconfig(predictable_fd_context_nr, FSCONFIG_SET_FLAG, "override_creds", NULL, 0)) {
+ TH_LOG("sys_fsconfig should have failed");
+ _exit(EXIT_FAILURE);
+ }
+
+ _exit(EXIT_SUCCESS);
+ }
+
+ ASSERT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(switch_userns(fd_userns1, 0, 0, false), true);
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+ ASSERT_EQ(dup3(fd_context, predictable_fd_context_nr, 0), predictable_fd_context_nr);
+ ASSERT_EQ(close(fd_context), 0);
+ fd_context = predictable_fd_context_nr;
+ ASSERT_EQ(write_nointr(ipc_sockets[0], "1", 1), 1);
+ ASSERT_EQ(close(ipc_sockets[0]), 0);
+
+ ASSERT_EQ(wait_for_pid(pid), 0);
+ ASSERT_EQ(close(self->pidfd), 0);
+ self->pidfd = -EBADF;
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++)
+ ASSERT_EQ(close(layer_fds[i]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "userxattr", NULL, 0), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+ ASSERT_EQ(close(fd_userns1), 0);
+ ASSERT_EQ(close(fd_userns2), 0);
+}
+
+TEST_F(set_layers_via_fds, set_override_creds_nomknod)
+{
+ int fd_context, fd_tmpfs, fd_overlay;
+ int layer_fds[] = { [0 ... 3] = -EBADF };
+ pid_t pid;
+ int pidfd;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+
+ layer_fds[0] = openat(fd_tmpfs, "w", O_DIRECTORY);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmpfs, "u", O_DIRECTORY);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = openat(fd_tmpfs, "l1", O_DIRECTORY);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = openat(fd_tmpfs, "l2", O_DIRECTORY);
+ ASSERT_GE(layer_fds[3], 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "userxattr", NULL, 0), 0);
+
+ pid = create_child(&pidfd, 0);
+ ASSERT_GE(pid, 0);
+ if (pid == 0) {
+ if (!cap_down(CAP_MKNOD))
+ _exit(EXIT_FAILURE);
+
+ if (!cap_down(CAP_SYS_ADMIN))
+ _exit(EXIT_FAILURE);
+
+ if (sys_fsconfig(fd_context, FSCONFIG_SET_FLAG, "override_creds", NULL, 0))
+ _exit(EXIT_FAILURE);
+
+ _exit(EXIT_SUCCESS);
+ }
+ ASSERT_EQ(sys_waitid(P_PID, pid, NULL, WEXITED), 0);
+ ASSERT_GE(close(pidfd), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(mknodat(fd_overlay, "dev-zero", S_IFCHR | 0644, makedev(1, 5)), -1);
+ ASSERT_EQ(errno, EPERM);
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_500_layers_via_opath_fds)
+{
+ int fd_context, fd_tmpfs, fd_overlay, fd_work, fd_upper, fd_lower;
+ int layer_fds[500] = { [0 ... 499] = -EBADF };
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ char path[100];
+
+ sprintf(path, "l%d", i);
+ ASSERT_EQ(mkdirat(fd_tmpfs, path, 0755), 0);
+ layer_fds[i] = openat(fd_tmpfs, path, O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[i], 0);
+ }
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "w", 0755), 0);
+ fd_work = openat(fd_tmpfs, "w", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_work, 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ fd_upper = openat(fd_tmpfs, "u", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_upper, 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l501", 0755), 0);
+ fd_lower = openat(fd_tmpfs, "l501", O_DIRECTORY | O_PATH);
+ ASSERT_GE(fd_lower, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/tmp", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, fd_work), 0);
+ ASSERT_EQ(close(fd_work), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, fd_upper), 0);
+ ASSERT_EQ(close(fd_upper), 0);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[i]), 0);
+ ASSERT_EQ(close(layer_fds[i]), 0);
+ }
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, fd_lower), 0);
+ ASSERT_EQ(close(fd_lower), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+}
+
+TEST_F(set_layers_via_fds, set_layers_via_detached_mount_fds)
+{
+ int fd_context, fd_tmpfs, fd_overlay, fd_tmp;
+ int layer_fds[] = { [0 ... 8] = -EBADF };
+ bool layers_found[] = { [0 ... 8] = false };
+ size_t len = 0;
+ char *line = NULL;
+ FILE *f_mountinfo;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(sys_mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL), 0);
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+ ASSERT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u/upper", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "u/work", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l2", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l3", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "l4", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d1", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d2", 0755), 0);
+ ASSERT_EQ(mkdirat(fd_tmpfs, "d3", 0755), 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tmpfs, "", -EBADF, "/set_layers_via_fds_tmpfs", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ fd_tmp = open_tree(fd_tmpfs, "u", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tmp, 0);
+
+ layer_fds[0] = openat(fd_tmp, "upper", O_CLOEXEC | O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[0], 0);
+
+ layer_fds[1] = openat(fd_tmp, "work", O_CLOEXEC | O_DIRECTORY | O_PATH);
+ ASSERT_GE(layer_fds[1], 0);
+
+ layer_fds[2] = open_tree(fd_tmpfs, "l1", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[2], 0);
+
+ layer_fds[3] = open_tree(fd_tmpfs, "l2", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[3], 0);
+
+ layer_fds[4] = open_tree(fd_tmpfs, "l3", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[4], 0);
+
+ layer_fds[5] = open_tree(fd_tmpfs, "l4", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[5], 0);
+
+ layer_fds[6] = open_tree(fd_tmpfs, "d1", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[6], 0);
+
+ layer_fds[7] = open_tree(fd_tmpfs, "d2", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[7], 0);
+
+ layer_fds[8] = open_tree(fd_tmpfs, "d3", OPEN_TREE_CLONE | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(layer_fds[8], 0);
+
+ ASSERT_EQ(close(fd_tmpfs), 0);
+
+ fd_context = sys_fsopen("overlay", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_NE(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir", NULL, layer_fds[2]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "upperdir", NULL, layer_fds[0]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "workdir", NULL, layer_fds[1]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[2]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[3]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[4]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "lowerdir+", NULL, layer_fds[5]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[6]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[7]), 0);
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_FD, "datadir+", NULL, layer_fds[8]), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_SET_STRING, "metacopy", "on", 0), 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_overlay = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_overlay, 0);
+
+ ASSERT_EQ(sys_move_mount(fd_overlay, "", -EBADF, "/set_layers_via_fds", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ f_mountinfo = fopen("/proc/self/mountinfo", "r");
+ ASSERT_NE(f_mountinfo, NULL);
+
+ while (getline(&line, &len, f_mountinfo) != -1) {
+ char *haystack = line;
+
+ if (strstr(haystack, "workdir=/tmp/w"))
+ layers_found[0] = true;
+ if (strstr(haystack, "upperdir=/tmp/u"))
+ layers_found[1] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l1"))
+ layers_found[2] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l2"))
+ layers_found[3] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l3"))
+ layers_found[4] = true;
+ if (strstr(haystack, "lowerdir+=/tmp/l4"))
+ layers_found[5] = true;
+ if (strstr(haystack, "datadir+=/tmp/d1"))
+ layers_found[6] = true;
+ if (strstr(haystack, "datadir+=/tmp/d2"))
+ layers_found[7] = true;
+ if (strstr(haystack, "datadir+=/tmp/d3"))
+ layers_found[8] = true;
+ }
+ free(line);
+
+ for (int i = 0; i < ARRAY_SIZE(layer_fds); i++) {
+ ASSERT_EQ(layers_found[i], true);
+ ASSERT_EQ(close(layer_fds[i]), 0);
+ }
+
+ ASSERT_EQ(close(fd_context), 0);
+ ASSERT_EQ(close(fd_overlay), 0);
+ ASSERT_EQ(fclose(f_mountinfo), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/filesystems/overlayfs/wrappers.h b/tools/testing/selftests/filesystems/overlayfs/wrappers.h
index 071b95fd2ac0..c38bc48e0cfa 100644
--- a/tools/testing/selftests/filesystems/overlayfs/wrappers.h
+++ b/tools/testing/selftests/filesystems/overlayfs/wrappers.h
@@ -44,4 +44,21 @@ static inline int sys_move_mount(int from_dfd, const char *from_pathname,
to_pathname, flags);
}
+#ifndef OPEN_TREE_CLONE
+#define OPEN_TREE_CLONE 1
+#endif
+
+#ifndef OPEN_TREE_CLOEXEC
+#define OPEN_TREE_CLOEXEC O_CLOEXEC
+#endif
+
+#ifndef AT_RECURSIVE
+#define AT_RECURSIVE 0x8000
+#endif
+
+static inline int sys_open_tree(int dfd, const char *filename, unsigned int flags)
+{
+ return syscall(__NR_open_tree, dfd, filename, flags);
+}
+
#endif
diff --git a/tools/testing/selftests/filesystems/statmount/statmount.h b/tools/testing/selftests/filesystems/statmount/statmount.h
index f4294bab9d73..a7a5289ddae9 100644
--- a/tools/testing/selftests/filesystems/statmount/statmount.h
+++ b/tools/testing/selftests/filesystems/statmount/statmount.h
@@ -25,7 +25,7 @@ static inline int statmount(uint64_t mnt_id, uint64_t mnt_ns_id, uint64_t mask,
return syscall(__NR_statmount, &req, buf, bufsize, flags);
}
-static ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
+static inline ssize_t listmount(uint64_t mnt_id, uint64_t mnt_ns_id,
uint64_t last_mnt_id, uint64_t list[], size_t num,
unsigned int flags)
{
diff --git a/tools/testing/selftests/filesystems/utils.c b/tools/testing/selftests/filesystems/utils.c
new file mode 100644
index 000000000000..e553c89c5b19
--- /dev/null
+++ b/tools/testing/selftests/filesystems/utils.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <fcntl.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <grp.h>
+#include <linux/limits.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/eventfd.h>
+#include <sys/fsuid.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+
+#include "utils.h"
+
+#define MAX_USERNS_LEVEL 32
+
+#define syserror(format, ...) \
+ ({ \
+ fprintf(stderr, "%m - " format "\n", ##__VA_ARGS__); \
+ (-errno); \
+ })
+
+#define syserror_set(__ret__, format, ...) \
+ ({ \
+ typeof(__ret__) __internal_ret__ = (__ret__); \
+ errno = labs(__ret__); \
+ fprintf(stderr, "%m - " format "\n", ##__VA_ARGS__); \
+ __internal_ret__; \
+ })
+
+#define STRLITERALLEN(x) (sizeof(""x"") - 1)
+
+#define INTTYPE_TO_STRLEN(type) \
+ (2 + (sizeof(type) <= 1 \
+ ? 3 \
+ : sizeof(type) <= 2 \
+ ? 5 \
+ : sizeof(type) <= 4 \
+ ? 10 \
+ : sizeof(type) <= 8 ? 20 : sizeof(int[-2 * (sizeof(type) > 8)])))
+
+#define list_for_each(__iterator, __list) \
+ for (__iterator = (__list)->next; __iterator != __list; __iterator = __iterator->next)
+
+typedef enum idmap_type_t {
+ ID_TYPE_UID,
+ ID_TYPE_GID
+} idmap_type_t;
+
+struct id_map {
+ idmap_type_t map_type;
+ __u32 nsid;
+ __u32 hostid;
+ __u32 range;
+};
+
+struct list {
+ void *elem;
+ struct list *next;
+ struct list *prev;
+};
+
+struct userns_hierarchy {
+ int fd_userns;
+ int fd_event;
+ unsigned int level;
+ struct list id_map;
+};
+
+static inline void list_init(struct list *list)
+{
+ list->elem = NULL;
+ list->next = list->prev = list;
+}
+
+static inline int list_empty(const struct list *list)
+{
+ return list == list->next;
+}
+
+static inline void __list_add(struct list *new, struct list *prev, struct list *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+
+static inline void list_add_tail(struct list *head, struct list *list)
+{
+ __list_add(list, head->prev, head);
+}
+
+static inline void list_del(struct list *list)
+{
+ struct list *next, *prev;
+
+ next = list->next;
+ prev = list->prev;
+ next->prev = prev;
+ prev->next = next;
+}
+
+static ssize_t read_nointr(int fd, void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = read(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+static ssize_t write_nointr(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ do {
+ ret = write(fd, buf, count);
+ } while (ret < 0 && errno == EINTR);
+
+ return ret;
+}
+
+#define __STACK_SIZE (8 * 1024 * 1024)
+static pid_t do_clone(int (*fn)(void *), void *arg, int flags)
+{
+ void *stack;
+
+ stack = malloc(__STACK_SIZE);
+ if (!stack)
+ return -ENOMEM;
+
+#ifdef __ia64__
+ return __clone2(fn, stack, __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#else
+ return clone(fn, stack + __STACK_SIZE, flags | SIGCHLD, arg, NULL);
+#endif
+}
+
+static int get_userns_fd_cb(void *data)
+{
+ for (;;)
+ pause();
+ _exit(0);
+}
+
+static int wait_for_pid(pid_t pid)
+{
+ int status, ret;
+
+again:
+ ret = waitpid(pid, &status, 0);
+ if (ret == -1) {
+ if (errno == EINTR)
+ goto again;
+
+ return -1;
+ }
+
+ if (!WIFEXITED(status))
+ return -1;
+
+ return WEXITSTATUS(status);
+}
+
+static int write_id_mapping(idmap_type_t map_type, pid_t pid, const char *buf, size_t buf_size)
+{
+ int fd = -EBADF, setgroups_fd = -EBADF;
+ int fret = -1;
+ int ret;
+ char path[STRLITERALLEN("/proc/") + INTTYPE_TO_STRLEN(pid_t) +
+ STRLITERALLEN("/setgroups") + 1];
+
+ if (geteuid() != 0 && map_type == ID_TYPE_GID) {
+ ret = snprintf(path, sizeof(path), "/proc/%d/setgroups", pid);
+ if (ret < 0 || ret >= sizeof(path))
+ goto out;
+
+ setgroups_fd = open(path, O_WRONLY | O_CLOEXEC);
+ if (setgroups_fd < 0 && errno != ENOENT) {
+ syserror("Failed to open \"%s\"", path);
+ goto out;
+ }
+
+ if (setgroups_fd >= 0) {
+ ret = write_nointr(setgroups_fd, "deny\n", STRLITERALLEN("deny\n"));
+ if (ret != STRLITERALLEN("deny\n")) {
+ syserror("Failed to write \"deny\" to \"/proc/%d/setgroups\"", pid);
+ goto out;
+ }
+ }
+ }
+
+ ret = snprintf(path, sizeof(path), "/proc/%d/%cid_map", pid, map_type == ID_TYPE_UID ? 'u' : 'g');
+ if (ret < 0 || ret >= sizeof(path))
+ goto out;
+
+ fd = open(path, O_WRONLY | O_CLOEXEC);
+ if (fd < 0) {
+ syserror("Failed to open \"%s\"", path);
+ goto out;
+ }
+
+ ret = write_nointr(fd, buf, buf_size);
+ if (ret != buf_size) {
+ syserror("Failed to write %cid mapping to \"%s\"",
+ map_type == ID_TYPE_UID ? 'u' : 'g', path);
+ goto out;
+ }
+
+ fret = 0;
+out:
+ close(fd);
+ close(setgroups_fd);
+
+ return fret;
+}
+
+static int map_ids_from_idmap(struct list *idmap, pid_t pid)
+{
+ int fill, left;
+ char mapbuf[4096] = {};
+ bool had_entry = false;
+ idmap_type_t map_type, u_or_g;
+
+ if (list_empty(idmap))
+ return 0;
+
+ for (map_type = ID_TYPE_UID, u_or_g = 'u';
+ map_type <= ID_TYPE_GID; map_type++, u_or_g = 'g') {
+ char *pos = mapbuf;
+ int ret;
+ struct list *iterator;
+
+
+ list_for_each(iterator, idmap) {
+ struct id_map *map = iterator->elem;
+ if (map->map_type != map_type)
+ continue;
+
+ had_entry = true;
+
+ left = 4096 - (pos - mapbuf);
+ fill = snprintf(pos, left, "%u %u %u\n", map->nsid, map->hostid, map->range);
+ /*
+ * The kernel only takes <= 4k for writes to
+ * /proc/<pid>/{g,u}id_map
+ */
+ if (fill <= 0 || fill >= left)
+ return syserror_set(-E2BIG, "Too many %cid mappings defined", u_or_g);
+
+ pos += fill;
+ }
+ if (!had_entry)
+ continue;
+
+ ret = write_id_mapping(map_type, pid, mapbuf, pos - mapbuf);
+ if (ret < 0)
+ return syserror("Failed to write mapping: %s", mapbuf);
+
+ memset(mapbuf, 0, sizeof(mapbuf));
+ }
+
+ return 0;
+}
+
+static int get_userns_fd_from_idmap(struct list *idmap)
+{
+ int ret;
+ pid_t pid;
+ char path_ns[STRLITERALLEN("/proc/") + INTTYPE_TO_STRLEN(pid_t) +
+ STRLITERALLEN("/ns/user") + 1];
+
+ pid = do_clone(get_userns_fd_cb, NULL, CLONE_NEWUSER | CLONE_NEWNS);
+ if (pid < 0)
+ return -errno;
+
+ ret = map_ids_from_idmap(idmap, pid);
+ if (ret < 0)
+ return ret;
+
+ ret = snprintf(path_ns, sizeof(path_ns), "/proc/%d/ns/user", pid);
+ if (ret < 0 || (size_t)ret >= sizeof(path_ns))
+ ret = -EIO;
+ else
+ ret = open(path_ns, O_RDONLY | O_CLOEXEC | O_NOCTTY);
+
+ (void)kill(pid, SIGKILL);
+ (void)wait_for_pid(pid);
+ return ret;
+}
+
+int get_userns_fd(unsigned long nsid, unsigned long hostid, unsigned long range)
+{
+ struct list head, uid_mapl, gid_mapl;
+ struct id_map uid_map = {
+ .map_type = ID_TYPE_UID,
+ .nsid = nsid,
+ .hostid = hostid,
+ .range = range,
+ };
+ struct id_map gid_map = {
+ .map_type = ID_TYPE_GID,
+ .nsid = nsid,
+ .hostid = hostid,
+ .range = range,
+ };
+
+ list_init(&head);
+ uid_mapl.elem = &uid_map;
+ gid_mapl.elem = &gid_map;
+ list_add_tail(&head, &uid_mapl);
+ list_add_tail(&head, &gid_mapl);
+
+ return get_userns_fd_from_idmap(&head);
+}
+
+bool switch_ids(uid_t uid, gid_t gid)
+{
+ if (setgroups(0, NULL))
+ return syserror("failure: setgroups");
+
+ if (setresgid(gid, gid, gid))
+ return syserror("failure: setresgid");
+
+ if (setresuid(uid, uid, uid))
+ return syserror("failure: setresuid");
+
+ /* Ensure we can access proc files from processes we can ptrace. */
+ if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0))
+ return syserror("failure: make dumpable");
+
+ return true;
+}
+
+static int create_userns_hierarchy(struct userns_hierarchy *h);
+
+static int userns_fd_cb(void *data)
+{
+ struct userns_hierarchy *h = data;
+ char c;
+ int ret;
+
+ ret = read_nointr(h->fd_event, &c, 1);
+ if (ret < 0)
+ return syserror("failure: read from socketpair");
+
+ /* Only switch ids if someone actually wrote a mapping for us. */
+ if (c == '1') {
+ if (!switch_ids(0, 0))
+ return syserror("failure: switch ids to 0");
+ }
+
+ ret = write_nointr(h->fd_event, "1", 1);
+ if (ret < 0)
+ return syserror("failure: write to socketpair");
+
+ ret = create_userns_hierarchy(++h);
+ if (ret < 0)
+ return syserror("failure: userns level %d", h->level);
+
+ return 0;
+}
+
+static int create_userns_hierarchy(struct userns_hierarchy *h)
+{
+ int fret = -1;
+ char c;
+ int fd_socket[2];
+ int fd_userns = -EBADF, ret = -1;
+ ssize_t bytes;
+ pid_t pid;
+ char path[256];
+
+ if (h->level == MAX_USERNS_LEVEL)
+ return 0;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, fd_socket);
+ if (ret < 0)
+ return syserror("failure: create socketpair");
+
+ /* Note the CLONE_FILES | CLONE_VM when mucking with fds and memory. */
+ h->fd_event = fd_socket[1];
+ pid = do_clone(userns_fd_cb, h, CLONE_NEWUSER | CLONE_FILES | CLONE_VM);
+ if (pid < 0) {
+ syserror("failure: userns level %d", h->level);
+ goto out_close;
+ }
+
+ ret = map_ids_from_idmap(&h->id_map, pid);
+ if (ret < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: writing id mapping for userns level %d for %d", h->level, pid);
+ goto out_wait;
+ }
+
+ if (!list_empty(&h->id_map))
+ bytes = write_nointr(fd_socket[0], "1", 1); /* Inform the child we wrote a mapping. */
+ else
+ bytes = write_nointr(fd_socket[0], "0", 1); /* Inform the child we didn't write a mapping. */
+ if (bytes < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: write to socketpair");
+ goto out_wait;
+ }
+
+ /* Wait for child to set*id() and become dumpable. */
+ bytes = read_nointr(fd_socket[0], &c, 1);
+ if (bytes < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: read from socketpair");
+ goto out_wait;
+ }
+
+ snprintf(path, sizeof(path), "/proc/%d/ns/user", pid);
+ fd_userns = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd_userns < 0) {
+ kill(pid, SIGKILL);
+ syserror("failure: open userns level %d for %d", h->level, pid);
+ goto out_wait;
+ }
+
+ fret = 0;
+
+out_wait:
+ if (!wait_for_pid(pid) && !fret) {
+ h->fd_userns = fd_userns;
+ fd_userns = -EBADF;
+ }
+
+out_close:
+ if (fd_userns >= 0)
+ close(fd_userns);
+ close(fd_socket[0]);
+ close(fd_socket[1]);
+ return fret;
+}
+
+/* caps_down - lower all effective caps */
+int caps_down(void)
+{
+ bool fret = false;
+ cap_t caps = NULL;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps)
+ goto out;
+
+ ret = cap_clear_flag(caps, CAP_EFFECTIVE);
+ if (ret)
+ goto out;
+
+ ret = cap_set_proc(caps);
+ if (ret)
+ goto out;
+
+ fret = true;
+
+out:
+ cap_free(caps);
+ return fret;
+}
+
+/* cap_down - lower an effective cap */
+int cap_down(cap_value_t down)
+{
+ bool fret = false;
+ cap_t caps = NULL;
+ cap_value_t cap = down;
+ int ret = -1;
+
+ caps = cap_get_proc();
+ if (!caps)
+ goto out;
+
+ ret = cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap, 0);
+ if (ret)
+ goto out;
+
+ ret = cap_set_proc(caps);
+ if (ret)
+ goto out;
+
+ fret = true;
+
+out:
+ cap_free(caps);
+ return fret;
+}
diff --git a/tools/testing/selftests/filesystems/utils.h b/tools/testing/selftests/filesystems/utils.h
new file mode 100644
index 000000000000..7f1df2a3e94c
--- /dev/null
+++ b/tools/testing/selftests/filesystems/utils.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __IDMAP_UTILS_H
+#define __IDMAP_UTILS_H
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <linux/types.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/capability.h>
+#include <sys/fsuid.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+extern int get_userns_fd(unsigned long nsid, unsigned long hostid,
+ unsigned long range);
+
+extern int caps_down(void);
+extern int cap_down(cap_value_t down);
+
+extern bool switch_ids(uid_t uid, gid_t gid);
+
+static inline bool switch_userns(int fd, uid_t uid, gid_t gid, bool drop_caps)
+{
+ if (setns(fd, CLONE_NEWUSER))
+ return false;
+
+ if (!switch_ids(uid, gid))
+ return false;
+
+ if (drop_caps && !caps_down())
+ return false;
+
+ return true;
+}
+
+#endif /* __IDMAP_UTILS_H */
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index cdf91b0ca40f..c3b6d2604b1e 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -444,10 +444,6 @@ static inline __noreturn __printf(1, 2) void ksft_exit_skip(const char *msg, ...
static inline int ksft_min_kernel_version(unsigned int min_major,
unsigned int min_minor)
{
-#ifdef NOLIBC
- ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
- return 0;
-#else
unsigned int major, minor;
struct utsname info;
@@ -455,7 +451,6 @@ static inline int ksft_min_kernel_version(unsigned int min_major,
ksft_exit_fail_msg("Can't parse kernel version\n");
return major > min_major || (major == min_major && minor >= min_minor);
-#endif
}
#endif /* __KSELFTEST_H */
diff --git a/tools/testing/selftests/kselftest/module.sh b/tools/testing/selftests/kselftest/module.sh
index fb4733faff12..51fb65159932 100755
--- a/tools/testing/selftests/kselftest/module.sh
+++ b/tools/testing/selftests/kselftest/module.sh
@@ -11,7 +11,7 @@
# SPDX-License-Identifier: GPL-2.0+
# $(dirname $0)/../kselftest/module.sh "description" module_name
#
-# Example: tools/testing/selftests/lib/printf.sh
+# Example: tools/testing/selftests/lib/bitmap.sh
desc="" # Output prefix.
module="" # Filename (without the .ko).
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 4277b983cace..f773f8f99249 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -69,6 +69,7 @@ TEST_GEN_PROGS_x86 += x86/hyperv_tlb_flush
TEST_GEN_PROGS_x86 += x86/kvm_clock_test
TEST_GEN_PROGS_x86 += x86/kvm_pv_test
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
+TEST_GEN_PROGS_x86 += x86/nested_emulation_test
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
TEST_GEN_PROGS_x86 += x86/platform_info_test
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 3c7defd34f56..447e619cf856 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -239,7 +239,7 @@ static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
case ITERATION_MARK_IDLE:
mark_vcpu_memory_idle(vm, vcpu_args);
break;
- };
+ }
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
}
diff --git a/tools/testing/selftests/kvm/arm64/get-reg-list.c b/tools/testing/selftests/kvm/arm64/get-reg-list.c
index d43fb3f49050..d01798b6b3b4 100644
--- a/tools/testing/selftests/kvm/arm64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/arm64/get-reg-list.c
@@ -332,6 +332,7 @@ static __u64 base_regs[] = {
KVM_REG_ARM_FW_FEAT_BMAP_REG(0), /* KVM_REG_ARM_STD_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(1), /* KVM_REG_ARM_STD_HYP_BMAP */
KVM_REG_ARM_FW_FEAT_BMAP_REG(2), /* KVM_REG_ARM_VENDOR_HYP_BMAP */
+ KVM_REG_ARM_FW_FEAT_BMAP_REG(3), /* KVM_REG_ARM_VENDOR_HYP_BMAP_2 */
ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/arm64/hypercalls.c b/tools/testing/selftests/kvm/arm64/hypercalls.c
index ec54ec7726e9..44cfcf8a7f46 100644
--- a/tools/testing/selftests/kvm/arm64/hypercalls.c
+++ b/tools/testing/selftests/kvm/arm64/hypercalls.c
@@ -21,22 +21,31 @@
#define KVM_REG_ARM_STD_BMAP_BIT_MAX 0
#define KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX 0
#define KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX 1
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_MAX 1
+
+#define KVM_REG_ARM_STD_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_BMAP_BIT_MAX)
+#define KVM_REG_ARM_STD_HYP_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_STD_HYP_BMAP_BIT_MAX)
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_RESET_VAL FW_REG_ULIMIT_VAL(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_MAX)
+#define KVM_REG_ARM_VENDOR_HYP_BMAP_2_RESET_VAL 0
struct kvm_fw_reg_info {
uint64_t reg; /* Register definition */
uint64_t max_feat_bit; /* Bit that represents the upper limit of the feature-map */
+ uint64_t reset_val; /* Reset value for the register */
};
#define FW_REG_INFO(r) \
{ \
.reg = r, \
.max_feat_bit = r##_BIT_MAX, \
+ .reset_val = r##_RESET_VAL \
}
static const struct kvm_fw_reg_info fw_reg_info[] = {
FW_REG_INFO(KVM_REG_ARM_STD_BMAP),
FW_REG_INFO(KVM_REG_ARM_STD_HYP_BMAP),
FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP),
+ FW_REG_INFO(KVM_REG_ARM_VENDOR_HYP_BMAP_2),
};
enum test_stage {
@@ -171,22 +180,39 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(fw_reg_info); i++) {
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
+ uint64_t set_val;
- /* First 'read' should be an upper limit of the features supported */
+ /* First 'read' should be the reset value for the reg */
val = vcpu_get_reg(vcpu, reg_info->reg);
- TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
- "Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
- reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
+ TEST_ASSERT(val == reg_info->reset_val,
+ "Unexpected reset value for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
+ reg_info->reg, reg_info->reset_val, val);
+
+ if (reg_info->reset_val)
+ set_val = 0;
+ else
+ set_val = FW_REG_ULIMIT_VAL(reg_info->max_feat_bit);
- /* Test a 'write' by disabling all the features of the register map */
- ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, set_val);
TEST_ASSERT(ret == 0,
- "Failed to clear all the features of reg: 0x%lx; ret: %d",
- reg_info->reg, errno);
+ "Failed to %s all the features of reg: 0x%lx; ret: %d",
+ (set_val ? "set" : "clear"), reg_info->reg, errno);
val = vcpu_get_reg(vcpu, reg_info->reg);
- TEST_ASSERT(val == 0,
- "Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
+ TEST_ASSERT(val == set_val,
+ "Expected all the features to be %s for reg: 0x%lx",
+ (set_val ? "set" : "cleared"), reg_info->reg);
+
+ /*
+ * If the reg has been set, clear it as test_fw_regs_after_vm_start()
+ * expects it to be cleared.
+ */
+ if (set_val) {
+ ret = __vcpu_set_reg(vcpu, reg_info->reg, 0);
+ TEST_ASSERT(ret == 0,
+ "Failed to clear all the features of reg: 0x%lx; ret: %d",
+ reg_info->reg, errno);
+ }
/*
* Test enabling a feature that's not supported.
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c
index 217541fe6536..322b9d3b0125 100644
--- a/tools/testing/selftests/kvm/arm64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c
@@ -146,6 +146,9 @@ static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {
static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
+ REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN4_2, 1),
+ REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN64_2, 1),
+ REG_FTR_BITS(FTR_EXACT, ID_AA64MMFR0_EL1, TGRAN16_2, 1),
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
@@ -230,6 +233,9 @@ static void guest_code(void)
GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
GUEST_REG_SYNC(SYS_CTR_EL0);
+ GUEST_REG_SYNC(SYS_MIDR_EL1);
+ GUEST_REG_SYNC(SYS_REVIDR_EL1);
+ GUEST_REG_SYNC(SYS_AIDR_EL1);
GUEST_DONE();
}
@@ -609,18 +615,31 @@ static void test_ctr(struct kvm_vcpu *vcpu)
test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
}
-static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
+static void test_id_reg(struct kvm_vcpu *vcpu, u32 id)
{
u64 val;
+ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(id));
+ val++;
+ vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(id), val);
+ test_reg_vals[encoding_to_range_idx(id)] = val;
+}
+
+static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
+{
test_clidr(vcpu);
test_ctr(vcpu);
- val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
- val++;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
+ test_id_reg(vcpu, SYS_MPIDR_EL1);
+ ksft_test_result_pass("%s\n", __func__);
+}
+
+static void test_vcpu_non_ftr_id_regs(struct kvm_vcpu *vcpu)
+{
+ test_id_reg(vcpu, SYS_MIDR_EL1);
+ test_id_reg(vcpu, SYS_REVIDR_EL1);
+ test_id_reg(vcpu, SYS_AIDR_EL1);
- test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
ksft_test_result_pass("%s\n", __func__);
}
@@ -647,6 +666,9 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
+ test_assert_id_reg_unchanged(vcpu, SYS_MIDR_EL1);
+ test_assert_id_reg_unchanged(vcpu, SYS_REVIDR_EL1);
+ test_assert_id_reg_unchanged(vcpu, SYS_AIDR_EL1);
ksft_test_result_pass("%s\n", __func__);
}
@@ -660,8 +682,11 @@ int main(void)
int test_cnt;
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS));
- vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vm = vm_create(1);
+ vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0);
+ vcpu = vm_vcpu_add(vm, 0, guest_code);
/* Check for AARCH64 only system */
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
@@ -675,13 +700,14 @@ int main(void)
ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
- ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
+ ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 3 +
MPAM_IDREG_TEST;
ksft_set_plan(test_cnt);
test_vm_ftr_id_regs(vcpu, aarch64_only);
test_vcpu_ftr_id_regs(vcpu);
+ test_vcpu_non_ftr_id_regs(vcpu);
test_user_set_mpam_reg(vcpu);
test_guest_reg_read(vcpu);
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index aacf80f57439..23593d9eeba9 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -31,15 +31,18 @@
/* Default guest test virtual memory offset */
#define DEFAULT_GUEST_TEST_MEM 0xc0000000
-/* How many pages to dirty for each guest loop */
-#define TEST_PAGES_PER_LOOP 1024
-
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
#define TEST_HOST_LOOP_N 32UL
/* Interval for each host loop (ms) */
#define TEST_HOST_LOOP_INTERVAL 10UL
+/*
+ * Ensure the vCPU is able to perform a reasonable number of writes in each
+ * iteration to provide a lower bound on coverage.
+ */
+#define TEST_MIN_WRITES_PER_ITERATION 0x100
+
/* Dirty bitmaps are always little endian, so we need to swap on big endian */
#if defined(__s390x__)
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
@@ -75,6 +78,8 @@ static uint64_t host_page_size;
static uint64_t guest_page_size;
static uint64_t guest_num_pages;
static uint64_t iteration;
+static uint64_t nr_writes;
+static bool vcpu_stop;
/*
* Guest physical memory offset of the testing memory slot.
@@ -96,7 +101,9 @@ static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
static void guest_code(void)
{
uint64_t addr;
- int i;
+
+#ifdef __s390x__
+ uint64_t i;
/*
* On s390x, all pages of a 1M segment are initially marked as dirty
@@ -107,16 +114,19 @@ static void guest_code(void)
for (i = 0; i < guest_num_pages; i++) {
addr = guest_test_virt_mem + i * guest_page_size;
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
+ nr_writes++;
}
+#endif
while (true) {
- for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
+ while (!READ_ONCE(vcpu_stop)) {
addr = guest_test_virt_mem;
addr += (guest_random_u64(&guest_rng) % guest_num_pages)
* guest_page_size;
addr = align_down(addr, host_page_size);
vcpu_arch_put_guest(*(uint64_t *)addr, READ_ONCE(iteration));
+ nr_writes++;
}
GUEST_SYNC(1);
@@ -133,25 +143,18 @@ static uint64_t host_num_pages;
/* For statistics only */
static uint64_t host_dirty_count;
static uint64_t host_clear_count;
-static uint64_t host_track_next_count;
/* Whether dirty ring reset is requested, or finished */
static sem_t sem_vcpu_stop;
static sem_t sem_vcpu_cont;
-/*
- * This is only set by main thread, and only cleared by vcpu thread. It is
- * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
- * is the only place that we'll guarantee both "dirty bit" and "dirty data"
- * will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
- * after setting dirty bit but before the data is written.
- */
-static atomic_t vcpu_sync_stop_requested;
+
/*
* This is updated by the vcpu thread to tell the host whether it's a
* ring-full event. It should only be read until a sem_wait() of
* sem_vcpu_stop and before vcpu continues to run.
*/
static bool dirty_ring_vcpu_ring_full;
+
/*
* This is only used for verifying the dirty pages. Dirty ring has a very
* tricky case when the ring just got full, kvm will do userspace exit due to
@@ -166,7 +169,51 @@ static bool dirty_ring_vcpu_ring_full;
* dirty gfn we've collected, so that if a mismatch of data found later in the
* verifying process, we let it pass.
*/
-static uint64_t dirty_ring_last_page;
+static uint64_t dirty_ring_last_page = -1ULL;
+
+/*
+ * In addition to the above, it is possible (especially if this
+ * test is run nested) for the above scenario to repeat multiple times:
+ *
+ * The following can happen:
+ *
+ * - L1 vCPU: Memory write is logged to PML but not committed.
+ *
+ * - L1 test thread: Ignores the write because its last dirty ring entry
+ * Resets the dirty ring which:
+ * - Resets the A/D bits in EPT
+ * - Issues tlb flush (invept), which is intercepted by L0
+ *
+ * - L0: frees the whole nested ept mmu root as the response to invept,
+ * and thus ensures that when memory write is retried, it will fault again
+ *
+ * - L1 vCPU: Same memory write is logged to the PML but not committed again.
+ *
+ * - L1 test thread: Ignores the write because its last dirty ring entry (again)
+ * Resets the dirty ring which:
+ * - Resets the A/D bits in EPT (again)
+ * - Issues tlb flush (again) which is intercepted by L0
+ *
+ * ...
+ *
+ * N times
+ *
+ * - L1 vCPU: Memory write is logged in the PML and then committed.
+ * Lots of other memory writes are logged and committed.
+ * ...
+ *
+ * - L1 test thread: Sees the memory write along with other memory writes
+ * in the dirty ring, and since the write is usually not
+ * the last entry in the dirty-ring and has a very outdated
+ * iteration, the test fails.
+ *
+ *
+ * Note that this is only possible when the write was the last log entry
+ * write during iteration N-1, thus remember last iteration last log entry
+ * and also don't fail when it is reported in the next iteration, together with
+ * an outdated iteration count.
+ */
+static uint64_t dirty_ring_prev_iteration_last_page;
enum log_mode_t {
/* Only use KVM_GET_DIRTY_LOG for logging */
@@ -191,24 +238,6 @@ static enum log_mode_t host_log_mode;
static pthread_t vcpu_thread;
static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
-static void vcpu_kick(void)
-{
- pthread_kill(vcpu_thread, SIG_IPI);
-}
-
-/*
- * In our test we do signal tricks, let's use a better version of
- * sem_wait to avoid signal interrupts
- */
-static void sem_wait_until(sem_t *sem)
-{
- int ret;
-
- do
- ret = sem_wait(sem);
- while (ret == -1 && errno == EINTR);
-}
-
static bool clear_log_supported(void)
{
return kvm_has_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
@@ -243,21 +272,16 @@ static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
/* Should only be called after a GUEST_SYNC */
static void vcpu_handle_sync_stop(void)
{
- if (atomic_read(&vcpu_sync_stop_requested)) {
- /* It means main thread is sleeping waiting */
- atomic_set(&vcpu_sync_stop_requested, false);
+ if (READ_ONCE(vcpu_stop)) {
sem_post(&sem_vcpu_stop);
- sem_wait_until(&sem_vcpu_cont);
+ sem_wait(&sem_vcpu_cont);
}
}
-static void default_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+static void default_after_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
- "vcpu run failed: errno=%d", err);
-
TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC,
"Invalid guest sync status: exit_reason=%s",
exit_reason_str(run->exit_reason));
@@ -324,7 +348,6 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
"%u != %u", cur->slot, slot);
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
"0x%llx >= 0x%x", cur->offset, num_pages);
- //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
__set_bit_le(cur->offset, bitmap);
dirty_ring_last_page = cur->offset;
dirty_gfn_set_collected(cur);
@@ -335,36 +358,11 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
return count;
}
-static void dirty_ring_wait_vcpu(void)
-{
- /* This makes sure that hardware PML cache flushed */
- vcpu_kick();
- sem_wait_until(&sem_vcpu_stop);
-}
-
-static void dirty_ring_continue_vcpu(void)
-{
- pr_info("Notifying vcpu to continue\n");
- sem_post(&sem_vcpu_cont);
-}
-
static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx)
{
- uint32_t count = 0, cleared;
- bool continued_vcpu = false;
-
- dirty_ring_wait_vcpu();
-
- if (!dirty_ring_vcpu_ring_full) {
- /*
- * This is not a ring-full event, it's safe to allow
- * vcpu to continue
- */
- dirty_ring_continue_vcpu();
- continued_vcpu = true;
- }
+ uint32_t count, cleared;
/* Only have one vcpu */
count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu),
@@ -379,35 +377,18 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
*/
TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
"with collected (%u)", cleared, count);
-
- if (!continued_vcpu) {
- TEST_ASSERT(dirty_ring_vcpu_ring_full,
- "Didn't continue vcpu even without ring full");
- dirty_ring_continue_vcpu();
- }
-
- pr_info("Iteration %ld collected %u pages\n", iteration, count);
}
-static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/* A ucall-sync or ring-full event is allowed */
if (get_ucall(vcpu, NULL) == UCALL_SYNC) {
- /* We should allow this to continue */
- ;
- } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
- (ret == -1 && err == EINTR)) {
- /* Update the flag first before pause */
- WRITE_ONCE(dirty_ring_vcpu_ring_full,
- run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
- sem_post(&sem_vcpu_stop);
- pr_info("vcpu stops because %s...\n",
- dirty_ring_vcpu_ring_full ?
- "dirty ring is full" : "vcpu is kicked out");
- sem_wait_until(&sem_vcpu_cont);
- pr_info("vcpu continues now.\n");
+ vcpu_handle_sync_stop();
+ } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL) {
+ WRITE_ONCE(dirty_ring_vcpu_ring_full, true);
+ vcpu_handle_sync_stop();
} else {
TEST_ASSERT(false, "Invalid guest sync status: "
"exit_reason=%s",
@@ -426,7 +407,7 @@ struct log_mode {
void *bitmap, uint32_t num_pages,
uint32_t *ring_buf_idx);
/* Hook to call when after each vcpu run */
- void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err);
+ void (*after_vcpu_run)(struct kvm_vcpu *vcpu);
} log_modes[LOG_MODE_NUM] = {
{
.name = "dirty-log",
@@ -449,15 +430,6 @@ struct log_mode {
},
};
-/*
- * We use this bitmap to track some pages that should have its dirty
- * bit set in the _next_ iteration. For example, if we detected the
- * page value changed to current iteration but at the same time the
- * page bit is cleared in the latest bitmap, then the system must
- * report that write in the next get dirty log call.
- */
-static unsigned long *host_bmap_track;
-
static void log_modes_dump(void)
{
int i;
@@ -497,170 +469,109 @@ static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot,
mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx);
}
-static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err)
+static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu)
{
struct log_mode *mode = &log_modes[host_log_mode];
if (mode->after_vcpu_run)
- mode->after_vcpu_run(vcpu, ret, err);
+ mode->after_vcpu_run(vcpu);
}
static void *vcpu_worker(void *data)
{
- int ret;
struct kvm_vcpu *vcpu = data;
- uint64_t pages_count = 0;
- struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
- + sizeof(sigset_t));
- sigset_t *sigset = (sigset_t *) &sigmask->sigset;
- /*
- * SIG_IPI is unblocked atomically while in KVM_RUN. It causes the
- * ioctl to return with -EINTR, but it is still pending and we need
- * to accept it with the sigwait.
- */
- sigmask->len = 8;
- pthread_sigmask(0, NULL, sigset);
- sigdelset(sigset, SIG_IPI);
- vcpu_ioctl(vcpu, KVM_SET_SIGNAL_MASK, sigmask);
-
- sigemptyset(sigset);
- sigaddset(sigset, SIG_IPI);
+ sem_wait(&sem_vcpu_cont);
while (!READ_ONCE(host_quit)) {
- /* Clear any existing kick signals */
- pages_count += TEST_PAGES_PER_LOOP;
/* Let the guest dirty the random pages */
- ret = __vcpu_run(vcpu);
- if (ret == -1 && errno == EINTR) {
- int sig = -1;
- sigwait(sigset, &sig);
- assert(sig == SIG_IPI);
- }
- log_mode_after_vcpu_run(vcpu, ret, errno);
+ vcpu_run(vcpu);
+ log_mode_after_vcpu_run(vcpu);
}
- pr_info("Dirtied %"PRIu64" pages\n", pages_count);
-
return NULL;
}
-static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
+static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long **bmap)
{
+ uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0;
uint64_t step = vm_num_host_pages(mode, 1);
- uint64_t page;
- uint64_t *value_ptr;
- uint64_t min_iter = 0;
for (page = 0; page < host_num_pages; page += step) {
- value_ptr = host_test_mem + page * host_page_size;
-
- /* If this is a special page that we were tracking... */
- if (__test_and_clear_bit_le(page, host_bmap_track)) {
- host_track_next_count++;
- TEST_ASSERT(test_bit_le(page, bmap),
- "Page %"PRIu64" should have its dirty bit "
- "set in this iteration but it is missing",
- page);
- }
+ uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size);
+ bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]);
- if (__test_and_clear_bit_le(page, bmap)) {
- bool matched;
-
- host_dirty_count++;
+ /*
+ * Ensure both bitmaps are cleared, as a page can be written
+ * multiple times per iteration, i.e. can show up in both
+ * bitmaps, and the dirty ring is additive, i.e. doesn't purge
+ * bitmap entries from previous collections.
+ */
+ if (__test_and_clear_bit_le(page, bmap[1]) || bmap0_dirty) {
+ nr_dirty_pages++;
/*
- * If the bit is set, the value written onto
- * the corresponding page should be either the
- * previous iteration number or the current one.
+ * If the page is dirty, the value written to memory
+ * should be the current iteration number.
*/
- matched = (*value_ptr == iteration ||
- *value_ptr == iteration - 1);
-
- if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
- if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
- /*
- * Short answer: this case is special
- * only for dirty ring test where the
- * page is the last page before a kvm
- * dirty ring full in iteration N-2.
- *
- * Long answer: Assuming ring size R,
- * one possible condition is:
- *
- * main thr vcpu thr
- * -------- --------
- * iter=1
- * write 1 to page 0~(R-1)
- * full, vmexit
- * collect 0~(R-1)
- * kick vcpu
- * write 1 to (R-1)~(2R-2)
- * full, vmexit
- * iter=2
- * collect (R-1)~(2R-2)
- * kick vcpu
- * write 1 to (2R-2)
- * (NOTE!!! "1" cached in cpu reg)
- * write 2 to (2R-1)~(3R-3)
- * full, vmexit
- * iter=3
- * collect (2R-2)~(3R-3)
- * (here if we read value on page
- * "2R-2" is 1, while iter=3!!!)
- *
- * This however can only happen once per iteration.
- */
- min_iter = iteration - 1;
+ if (val == iteration)
+ continue;
+
+ if (host_log_mode == LOG_MODE_DIRTY_RING) {
+ /*
+ * The last page in the ring from previous
+ * iteration can be written with the value
+ * from the previous iteration, as the value to
+ * be written may be cached in a CPU register.
+ */
+ if (page == dirty_ring_prev_iteration_last_page &&
+ val == iteration - 1)
continue;
- } else if (page == dirty_ring_last_page) {
- /*
- * Please refer to comments in
- * dirty_ring_last_page.
- */
+
+ /*
+ * Any value from a previous iteration is legal
+ * for the last entry, as the write may not yet
+ * have retired, i.e. the page may hold whatever
+ * it had before this iteration started.
+ */
+ if (page == dirty_ring_last_page &&
+ val < iteration)
continue;
- }
+ } else if (!val && iteration == 1 && bmap0_dirty) {
+ /*
+ * When testing get+clear, the dirty bitmap
+ * starts with all bits set, and so the first
+ * iteration can observe a "dirty" page that
+ * was never written, but only in the first
+ * bitmap (collecting the bitmap also clears
+ * all dirty pages).
+ */
+ continue;
}
- TEST_ASSERT(matched,
- "Set page %"PRIu64" value %"PRIu64
- " incorrect (iteration=%"PRIu64")",
- page, *value_ptr, iteration);
+ TEST_FAIL("Dirty page %lu value (%lu) != iteration (%lu) "
+ "(last = %lu, prev_last = %lu)",
+ page, val, iteration, dirty_ring_last_page,
+ dirty_ring_prev_iteration_last_page);
} else {
- host_clear_count++;
+ nr_clean_pages++;
/*
* If cleared, the value written can be any
- * value smaller or equals to the iteration
- * number. Note that the value can be exactly
- * (iteration-1) if that write can happen
- * like this:
- *
- * (1) increase loop count to "iteration-1"
- * (2) write to page P happens (with value
- * "iteration-1")
- * (3) get dirty log for "iteration-1"; we'll
- * see that page P bit is set (dirtied),
- * and not set the bit in host_bmap_track
- * (4) increase loop count to "iteration"
- * (which is current iteration)
- * (5) get dirty log for current iteration,
- * we'll see that page P is cleared, with
- * value "iteration-1".
+ * value smaller than the iteration number.
*/
- TEST_ASSERT(*value_ptr <= iteration,
- "Clear page %"PRIu64" value %"PRIu64
- " incorrect (iteration=%"PRIu64")",
- page, *value_ptr, iteration);
- if (*value_ptr == iteration) {
- /*
- * This page is _just_ modified; it
- * should report its dirtyness in the
- * next run
- */
- __set_bit_le(page, host_bmap_track);
- }
+ TEST_ASSERT(val < iteration,
+ "Clear page %lu value (%lu) >= iteration (%lu) "
+ "(last = %lu, prev_last = %lu)",
+ page, val, iteration, dirty_ring_last_page,
+ dirty_ring_prev_iteration_last_page);
}
}
+
+ pr_info("Iteration %2ld: dirty: %-6lu clean: %-6lu writes: %-6lu\n",
+ iteration, nr_dirty_pages, nr_clean_pages, nr_writes);
+
+ host_dirty_count += nr_dirty_pages;
+ host_clear_count += nr_clean_pages;
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, struct kvm_vcpu **vcpu,
@@ -688,7 +599,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
struct test_params *p = arg;
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
- unsigned long *bmap;
+ unsigned long *bmap[2];
uint32_t ring_buf_idx = 0;
int sem_val;
@@ -731,12 +642,21 @@ static void run_test(enum vm_guest_mode mode, void *arg)
#ifdef __s390x__
/* Align to 1M (segment size) */
guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20);
+
+ /*
+ * The workaround in guest_code() to write all pages prior to the first
+ * iteration isn't compatible with the dirty ring, as the dirty ring
+ * support relies on the vCPU to actually stop when vcpu_stop is set so
+ * that the vCPU doesn't hang waiting for the dirty ring to be emptied.
+ */
+ TEST_ASSERT(host_log_mode != LOG_MODE_DIRTY_RING,
+ "Test needs to be updated to support s390 dirty ring");
#endif
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
- bmap = bitmap_zalloc(host_num_pages);
- host_bmap_track = bitmap_zalloc(host_num_pages);
+ bmap[0] = bitmap_zalloc(host_num_pages);
+ bmap[1] = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@@ -757,14 +677,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
sync_global_to_guest(vm, guest_test_virt_mem);
sync_global_to_guest(vm, guest_num_pages);
- /* Start the iterations */
- iteration = 1;
- sync_global_to_guest(vm, iteration);
- WRITE_ONCE(host_quit, false);
host_dirty_count = 0;
host_clear_count = 0;
- host_track_next_count = 0;
- WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+ WRITE_ONCE(host_quit, false);
/*
* Ensure the previous iteration didn't leave a dangling semaphore, i.e.
@@ -776,21 +691,95 @@ static void run_test(enum vm_guest_mode mode, void *arg)
sem_getvalue(&sem_vcpu_cont, &sem_val);
TEST_ASSERT_EQ(sem_val, 0);
+ TEST_ASSERT_EQ(vcpu_stop, false);
+
pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu);
- while (iteration < p->iterations) {
- /* Give the vcpu thread some time to dirty some pages */
- usleep(p->interval * 1000);
- log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
- bmap, host_num_pages,
- &ring_buf_idx);
+ for (iteration = 1; iteration <= p->iterations; iteration++) {
+ unsigned long i;
+
+ sync_global_to_guest(vm, iteration);
+
+ WRITE_ONCE(nr_writes, 0);
+ sync_global_to_guest(vm, nr_writes);
+
+ dirty_ring_prev_iteration_last_page = dirty_ring_last_page;
+ WRITE_ONCE(dirty_ring_vcpu_ring_full, false);
+
+ sem_post(&sem_vcpu_cont);
+
+ /*
+ * Let the vCPU run beyond the configured interval until it has
+ * performed the minimum number of writes. This verifies the
+ * guest is making forward progress, e.g. isn't stuck because
+ * of a KVM bug, and puts a firm floor on test coverage.
+ */
+ for (i = 0; i < p->interval || nr_writes < TEST_MIN_WRITES_PER_ITERATION; i++) {
+ /*
+ * Sleep in 1ms chunks to keep the interval math simple
+ * and so that the test doesn't run too far beyond the
+ * specified interval.
+ */
+ usleep(1000);
+
+ sync_global_from_guest(vm, nr_writes);
+
+ /*
+ * Reap dirty pages while the guest is running so that
+ * dirty ring full events are resolved, i.e. so that a
+ * larger interval doesn't always end up with a vCPU
+ * that's effectively blocked. Collecting while the
+ * guest is running also verifies KVM doesn't lose any
+ * state.
+ *
+ * For bitmap modes, KVM overwrites the entire bitmap,
+ * i.e. collecting the bitmaps is destructive. Collect
+ * the bitmap only on the first pass, otherwise this
+ * test would lose track of dirty pages.
+ */
+ if (i && host_log_mode != LOG_MODE_DIRTY_RING)
+ continue;
+
+ /*
+ * For the dirty ring, empty the ring on subsequent
+ * passes only if the ring was filled at least once,
+ * to verify KVM's handling of a full ring (emptying
+ * the ring on every pass would make it unlikely the
+ * vCPU would ever fill the fing).
+ */
+ if (i && !READ_ONCE(dirty_ring_vcpu_ring_full))
+ continue;
+
+ log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
+ bmap[0], host_num_pages,
+ &ring_buf_idx);
+ }
+
+ /*
+ * Stop the vCPU prior to collecting and verifying the dirty
+ * log. If the vCPU is allowed to run during collection, then
+ * pages that are written during this iteration may be missed,
+ * i.e. collected in the next iteration. And if the vCPU is
+ * writing memory during verification, pages that this thread
+ * sees as clean may be written with this iteration's value.
+ */
+ WRITE_ONCE(vcpu_stop, true);
+ sync_global_to_guest(vm, vcpu_stop);
+ sem_wait(&sem_vcpu_stop);
/*
- * See vcpu_sync_stop_requested definition for details on why
- * we need to stop vcpu when verify data.
+ * Clear vcpu_stop after the vCPU thread has acknowledge the
+ * stop request and is waiting, i.e. is definitely not running!
*/
- atomic_set(&vcpu_sync_stop_requested, true);
- sem_wait_until(&sem_vcpu_stop);
+ WRITE_ONCE(vcpu_stop, false);
+ sync_global_to_guest(vm, vcpu_stop);
+
+ /*
+ * Sync the number of writes performed before verification, the
+ * info will be printed along with the dirty/clean page counts.
+ */
+ sync_global_from_guest(vm, nr_writes);
+
/*
* NOTE: for dirty ring, it's possible that we didn't stop at
* GUEST_SYNC but instead we stopped because ring is full;
@@ -798,32 +787,22 @@ static void run_test(enum vm_guest_mode mode, void *arg)
* the flush of the last page, and since we handle the last
* page specially verification will succeed anyway.
*/
- assert(host_log_mode == LOG_MODE_DIRTY_RING ||
- atomic_read(&vcpu_sync_stop_requested) == false);
+ log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX,
+ bmap[1], host_num_pages,
+ &ring_buf_idx);
vm_dirty_log_verify(mode, bmap);
-
- /*
- * Set host_quit before sem_vcpu_cont in the final iteration to
- * ensure that the vCPU worker doesn't resume the guest. As
- * above, the dirty ring test may stop and wait even when not
- * explicitly request to do so, i.e. would hang waiting for a
- * "continue" if it's allowed to resume the guest.
- */
- if (++iteration == p->iterations)
- WRITE_ONCE(host_quit, true);
-
- sem_post(&sem_vcpu_cont);
- sync_global_to_guest(vm, iteration);
}
+ WRITE_ONCE(host_quit, true);
+ sem_post(&sem_vcpu_cont);
+
pthread_join(vcpu_thread, NULL);
- pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
- "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
- host_track_next_count);
+ pr_info("Total bits checked: dirty (%lu), clear (%lu)\n",
+ host_dirty_count, host_clear_count);
- free(bmap);
- free(host_bmap_track);
+ free(bmap[0]);
+ free(bmap[1]);
kvm_vm_free(vm);
}
@@ -857,7 +836,6 @@ int main(int argc, char *argv[])
.interval = TEST_HOST_LOOP_INTERVAL,
};
int opt, i;
- sigset_t sigset;
sem_init(&sem_vcpu_stop, 0, 0);
sem_init(&sem_vcpu_cont, 0, 0);
@@ -908,19 +886,12 @@ int main(int argc, char *argv[])
}
}
- TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
+ TEST_ASSERT(p.iterations > 0, "Iterations must be greater than zero");
TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
p.iterations, p.interval);
- srandom(time(0));
-
- /* Ensure that vCPU threads start with SIG_IPI blocked. */
- sigemptyset(&sigset);
- sigaddset(&sigset, SIG_IPI);
- pthread_sigmask(SIG_BLOCK, &sigset, NULL);
-
if (host_log_mode_option == LOG_MODE_ALL) {
/* Run each log mode */
for (i = 0; i < LOG_MODE_NUM; i++) {
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 4c4e5a847f67..373912464fb4 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -46,6 +46,12 @@ struct userspace_mem_region {
struct hlist_node slot_node;
};
+struct kvm_binary_stats {
+ int fd;
+ struct kvm_stats_header header;
+ struct kvm_stats_desc *desc;
+};
+
struct kvm_vcpu {
struct list_head list;
uint32_t id;
@@ -55,6 +61,7 @@ struct kvm_vcpu {
#ifdef __x86_64__
struct kvm_cpuid2 *cpuid;
#endif
+ struct kvm_binary_stats stats;
struct kvm_dirty_gfn *dirty_gfns;
uint32_t fetch_index;
uint32_t dirty_gfns_count;
@@ -99,10 +106,7 @@ struct kvm_vm {
struct kvm_vm_arch arch;
- /* Cache of information for binary stats interface */
- int stats_fd;
- struct kvm_stats_header stats_header;
- struct kvm_stats_desc *stats_desc;
+ struct kvm_binary_stats stats;
/*
* KVM region slots. These are the default memslots used by page
@@ -531,16 +535,19 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
struct kvm_stats_desc *desc, uint64_t *data,
size_t max_elements);
-void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
- size_t max_elements);
+void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
+ uint64_t *data, size_t max_elements);
-static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
-{
- uint64_t data;
+#define __get_stat(stats, stat) \
+({ \
+ uint64_t data; \
+ \
+ kvm_get_stat(stats, #stat, &data, 1); \
+ data; \
+})
- __vm_get_stat(vm, stat_name, &data, 1);
- return data;
-}
+#define vm_get_stat(vm, stat) __get_stat(&(vm)->stats, stat)
+#define vcpu_get_stat(vcpu, stat) __get_stat(&(vcpu)->stats, stat)
void vm_create_irqchip(struct kvm_vm *vm);
@@ -963,6 +970,8 @@ static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
+void kvm_set_files_rlimit(uint32_t nr_vcpus);
+
void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
void kvm_print_vcpu_pinning_help(void);
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 3e473058849f..77d13d7920cb 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -22,7 +22,7 @@
#define msecs_to_usecs(msec) ((msec) * 1000ULL)
-static inline int _no_printf(const char *format, ...) { return 0; }
+static inline __printf(1, 2) int _no_printf(const char *format, ...) { return 0; }
#ifdef DEBUG
#define pr_debug(...) printf(__VA_ARGS__)
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index d60da8966772..32ab6ca7ec32 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -183,6 +183,9 @@ struct kvm_x86_cpu_feature {
* Extended Leafs, a.k.a. AMD defined
*/
#define X86_FEATURE_SVM KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 2)
+#define X86_FEATURE_PERFCTR_CORE KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 23)
+#define X86_FEATURE_PERFCTR_NB KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 24)
+#define X86_FEATURE_PERFCTR_LLC KVM_X86_CPU_FEATURE(0x80000001, 0, ECX, 28)
#define X86_FEATURE_NX KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 20)
#define X86_FEATURE_GBPAGES KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 26)
#define X86_FEATURE_RDTSCP KVM_X86_CPU_FEATURE(0x80000001, 0, EDX, 27)
@@ -197,8 +200,11 @@ struct kvm_x86_cpu_feature {
#define X86_FEATURE_PAUSEFILTER KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 10)
#define X86_FEATURE_PFTHRESHOLD KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 12)
#define X86_FEATURE_VGIF KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 16)
+#define X86_FEATURE_IDLE_HLT KVM_X86_CPU_FEATURE(0x8000000A, 0, EDX, 30)
#define X86_FEATURE_SEV KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 1)
#define X86_FEATURE_SEV_ES KVM_X86_CPU_FEATURE(0x8000001F, 0, EAX, 3)
+#define X86_FEATURE_PERFMON_V2 KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 0)
+#define X86_FEATURE_LBR_PMC_FREEZE KVM_X86_CPU_FEATURE(0x80000022, 0, EAX, 2)
/*
* KVM defined paravirt features.
@@ -285,6 +291,8 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23)
#define X86_PROPERTY_SEV_C_BIT KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
#define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
+#define X86_PROPERTY_NR_PERFCTR_CORE KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 0, 3)
+#define X86_PROPERTY_NR_PERFCTR_NB KVM_X86_CPU_PROPERTY(0x80000022, 0, EBX, 10, 15)
#define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
@@ -1244,7 +1252,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
uint64_t ign_error_code; \
uint8_t vector; \
\
- asm volatile(KVM_ASM_SAFE(insn) \
+ asm volatile(KVM_ASM_SAFE_FEP(insn) \
: KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code) \
: inputs \
: KVM_ASM_SAFE_CLOBBERS); \
@@ -1339,6 +1347,46 @@ static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
GUEST_ASSERT(!ret);
}
+/*
+ * Execute HLT in an STI interrupt shadow to ensure that a pending IRQ that's
+ * intended to be a wake event arrives *after* HLT is executed. Modern CPUs,
+ * except for a few oddballs that KVM is unlikely to run on, block IRQs for one
+ * instruction after STI, *if* RFLAGS.IF=0 before STI. Note, Intel CPUs may
+ * block other events beyond regular IRQs, e.g. may block NMIs and SMIs too.
+ */
+static inline void safe_halt(void)
+{
+ asm volatile("sti; hlt");
+}
+
+/*
+ * Enable interrupts and ensure that interrupts are evaluated upon return from
+ * this function, i.e. execute a nop to consume the STi interrupt shadow.
+ */
+static inline void sti_nop(void)
+{
+ asm volatile ("sti; nop");
+}
+
+/*
+ * Enable interrupts for one instruction (nop), to allow the CPU to process all
+ * interrupts that are already pending.
+ */
+static inline void sti_nop_cli(void)
+{
+ asm volatile ("sti; nop; cli");
+}
+
+static inline void sti(void)
+{
+ asm volatile("sti");
+}
+
+static inline void cli(void)
+{
+ asm volatile ("cli");
+}
+
void __vm_xsave_require_permission(uint64_t xfeature, const char *name);
#define vm_xsave_require_permission(xfeature) \
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index c78f34699f73..c5310736ed06 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -10,7 +10,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/resource.h>
#include "test_util.h"
@@ -39,36 +38,11 @@ int main(int argc, char *argv[])
{
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
- /*
- * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
- * an arbitrary number for everything else.
- */
- int nr_fds_wanted = kvm_max_vcpus + 100;
- struct rlimit rl;
pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
- /*
- * Check that we're allowed to open nr_fds_wanted file descriptors and
- * try raising the limits if needed.
- */
- TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
-
- if (rl.rlim_cur < nr_fds_wanted) {
- rl.rlim_cur = nr_fds_wanted;
- if (rl.rlim_max < nr_fds_wanted) {
- int old_rlim_max = rl.rlim_max;
- rl.rlim_max = nr_fds_wanted;
-
- int r = setrlimit(RLIMIT_NOFILE, &rl);
- __TEST_REQUIRE(r >= 0,
- "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
- old_rlim_max, nr_fds_wanted);
- } else {
- TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
- }
- }
+ kvm_set_files_rlimit(kvm_max_vcpus);
/*
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 33fefeb3ca44..279ad8946040 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -12,6 +12,7 @@
#include <assert.h>
#include <sched.h>
#include <sys/mman.h>
+#include <sys/resource.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -196,6 +197,11 @@ static void vm_open(struct kvm_vm *vm)
vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
+
+ if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
+ vm->stats.fd = vm_get_stats_fd(vm);
+ else
+ vm->stats.fd = -1;
}
const char *vm_guest_mode_string(uint32_t i)
@@ -406,6 +412,38 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
return vm_adjust_num_guest_pages(mode, nr_pages);
}
+void kvm_set_files_rlimit(uint32_t nr_vcpus)
+{
+ /*
+ * Each vCPU will open two file descriptors: the vCPU itself and the
+ * vCPU's binary stats file descriptor. Add an arbitrary amount of
+ * buffer for all other files a test may open.
+ */
+ int nr_fds_wanted = nr_vcpus * 2 + 100;
+ struct rlimit rl;
+
+ /*
+ * Check that we're allowed to open nr_fds_wanted file descriptors and
+ * try raising the limits if needed.
+ */
+ TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+ if (rl.rlim_cur < nr_fds_wanted) {
+ rl.rlim_cur = nr_fds_wanted;
+ if (rl.rlim_max < nr_fds_wanted) {
+ int old_rlim_max = rl.rlim_max;
+
+ rl.rlim_max = nr_fds_wanted;
+ __TEST_REQUIRE(setrlimit(RLIMIT_NOFILE, &rl) >= 0,
+ "RLIMIT_NOFILE hard limit is too low (%d, wanted %d)",
+ old_rlim_max, nr_fds_wanted);
+ } else {
+ TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+ }
+ }
+
+}
+
struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
uint64_t nr_extra_pages)
{
@@ -415,6 +453,8 @@ struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
struct kvm_vm *vm;
int i;
+ kvm_set_files_rlimit(nr_runnable_vcpus);
+
pr_debug("%s: mode='%s' type='%d', pages='%ld'\n", __func__,
vm_guest_mode_string(shape.mode), shape.type, nr_pages);
@@ -657,6 +697,23 @@ userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
return NULL;
}
+static void kvm_stats_release(struct kvm_binary_stats *stats)
+{
+ int ret;
+
+ if (stats->fd < 0)
+ return;
+
+ if (stats->desc) {
+ free(stats->desc);
+ stats->desc = NULL;
+ }
+
+ ret = close(stats->fd);
+ TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+ stats->fd = -1;
+}
+
__weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
{
@@ -690,6 +747,8 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
ret = close(vcpu->fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+ kvm_stats_release(&vcpu->stats);
+
list_del(&vcpu->list);
vcpu_arch_free(vcpu);
@@ -709,6 +768,9 @@ void kvm_vm_release(struct kvm_vm *vmp)
ret = close(vmp->kvm_fd);
TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
+
+ /* Free cached stats metadata and close FD */
+ kvm_stats_release(&vmp->stats);
}
static void __vm_mem_region_delete(struct kvm_vm *vm,
@@ -748,12 +810,6 @@ void kvm_vm_free(struct kvm_vm *vmp)
if (vmp == NULL)
return;
- /* Free cached stats metadata and close FD */
- if (vmp->stats_fd) {
- free(vmp->stats_desc);
- close(vmp->stats_fd);
- }
-
/* Free userspace_mem_regions. */
hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
__vm_mem_region_delete(vmp, region);
@@ -1286,6 +1342,11 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
TEST_ASSERT(vcpu->run != MAP_FAILED,
__KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
+ if (kvm_has_cap(KVM_CAP_BINARY_STATS_FD))
+ vcpu->stats.fd = vcpu_get_stats_fd(vcpu);
+ else
+ vcpu->stats.fd = -1;
+
/* Add to linked-list of VCPUs. */
list_add(&vcpu->list, &vm->vcpus);
@@ -2198,46 +2259,31 @@ void read_stat_data(int stats_fd, struct kvm_stats_header *header,
desc->name, size, ret);
}
-/*
- * Read the data of the named stat
- *
- * Input Args:
- * vm - the VM for which the stat should be read
- * stat_name - the name of the stat to read
- * max_elements - the maximum number of 8-byte values to read into data
- *
- * Output Args:
- * data - the buffer into which stat data should be read
- *
- * Read the data values of a specified stat from the binary stats interface.
- */
-void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
- size_t max_elements)
+void kvm_get_stat(struct kvm_binary_stats *stats, const char *name,
+ uint64_t *data, size_t max_elements)
{
struct kvm_stats_desc *desc;
size_t size_desc;
int i;
- if (!vm->stats_fd) {
- vm->stats_fd = vm_get_stats_fd(vm);
- read_stats_header(vm->stats_fd, &vm->stats_header);
- vm->stats_desc = read_stats_descriptors(vm->stats_fd,
- &vm->stats_header);
+ if (!stats->desc) {
+ read_stats_header(stats->fd, &stats->header);
+ stats->desc = read_stats_descriptors(stats->fd, &stats->header);
}
- size_desc = get_stats_descriptor_size(&vm->stats_header);
+ size_desc = get_stats_descriptor_size(&stats->header);
- for (i = 0; i < vm->stats_header.num_desc; ++i) {
- desc = (void *)vm->stats_desc + (i * size_desc);
+ for (i = 0; i < stats->header.num_desc; ++i) {
+ desc = (void *)stats->desc + (i * size_desc);
- if (strcmp(desc->name, stat_name))
+ if (strcmp(desc->name, name))
continue;
- read_stat_data(vm->stats_fd, &vm->stats_header, desc,
- data, max_elements);
-
- break;
+ read_stat_data(stats->fd, &stats->header, desc, data, max_elements);
+ return;
}
+
+ TEST_FAIL("Unable to find stat '%s'", name);
}
__weak void kvm_arch_vm_post_create(struct kvm_vm *vm)
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 7c9de8414462..5bde176cedd5 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -114,7 +114,7 @@ struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay,
PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n",
is_minor ? "MINOR" : "MISSING",
- is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY");
+ is_minor ? "UFFDIO_CONTINUE" : "UFFDIO_COPY");
uffd_desc = malloc(sizeof(struct uffd_desc));
TEST_ASSERT(uffd_desc, "Failed to malloc uffd descriptor");
diff --git a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
index f45c0ecc902d..03406de4989d 100644
--- a/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
+++ b/tools/testing/selftests/kvm/riscv/sbi_pmu_test.c
@@ -39,7 +39,13 @@ static bool illegal_handler_invoked;
#define SBI_PMU_TEST_SNAPSHOT BIT(2)
#define SBI_PMU_TEST_OVERFLOW BIT(3)
-static int disabled_tests;
+#define SBI_PMU_OVERFLOW_IRQNUM_DEFAULT 5
+struct test_args {
+ int disabled_tests;
+ int overflow_irqnum;
+};
+
+static struct test_args targs;
unsigned long pmu_csr_read_num(int csr_num)
{
@@ -118,8 +124,8 @@ static void stop_counter(unsigned long counter, unsigned long stop_flags)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter, 1, stop_flags,
0, 0, 0);
- __GUEST_ASSERT(ret.error == 0, "Unable to stop counter %ld error %ld\n",
- counter, ret.error);
+ __GUEST_ASSERT(ret.error == 0 || ret.error == SBI_ERR_ALREADY_STOPPED,
+ "Unable to stop counter %ld error %ld\n", counter, ret.error);
}
static void guest_illegal_exception_handler(struct ex_regs *regs)
@@ -137,7 +143,6 @@ static void guest_irq_handler(struct ex_regs *regs)
unsigned int irq_num = regs->cause & ~CAUSE_IRQ_FLAG;
struct riscv_pmu_snapshot_data *snapshot_data = snapshot_gva;
unsigned long overflown_mask;
- unsigned long counter_val = 0;
/* Validate that we are in the correct irq handler */
GUEST_ASSERT_EQ(irq_num, IRQ_PMU_OVF);
@@ -151,10 +156,6 @@ static void guest_irq_handler(struct ex_regs *regs)
GUEST_ASSERT(overflown_mask & 0x01);
WRITE_ONCE(vcpu_shared_irq_count, vcpu_shared_irq_count+1);
-
- counter_val = READ_ONCE(snapshot_data->ctr_values[0]);
- /* Now start the counter to mimick the real driver behavior */
- start_counter(counter_in_use, SBI_PMU_START_FLAG_SET_INIT_VALUE, counter_val);
}
static unsigned long get_counter_index(unsigned long cbase, unsigned long cmask,
@@ -479,7 +480,7 @@ static void test_pmu_events_snaphost(void)
static void test_pmu_events_overflow(void)
{
- int num_counters = 0;
+ int num_counters = 0, i = 0;
/* Verify presence of SBI PMU and minimum requrired SBI version */
verify_sbi_requirement_assert();
@@ -496,11 +497,15 @@ static void test_pmu_events_overflow(void)
* Qemu supports overflow for cycle/instruction.
* This test may fail on any platform that do not support overflow for these two events.
*/
- test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
- GUEST_ASSERT_EQ(vcpu_shared_irq_count, 1);
+ for (i = 0; i < targs.overflow_irqnum; i++)
+ test_pmu_event_overflow(SBI_PMU_HW_CPU_CYCLES);
+ GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
+
+ vcpu_shared_irq_count = 0;
- test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
- GUEST_ASSERT_EQ(vcpu_shared_irq_count, 2);
+ for (i = 0; i < targs.overflow_irqnum; i++)
+ test_pmu_event_overflow(SBI_PMU_HW_INSTRUCTIONS);
+ GUEST_ASSERT_EQ(vcpu_shared_irq_count, targs.overflow_irqnum);
GUEST_DONE();
}
@@ -609,7 +614,11 @@ static void test_vm_events_overflow(void *guest_code)
vcpu_init_vector_tables(vcpu);
/* Initialize guest timer frequency. */
timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency));
+
+ /* Export the shared variables to the guest */
sync_global_to_guest(vm, timer_freq);
+ sync_global_to_guest(vm, vcpu_shared_irq_count);
+ sync_global_to_guest(vm, targs);
run_vcpu(vcpu);
@@ -618,28 +627,38 @@ static void test_vm_events_overflow(void *guest_code)
static void test_print_help(char *name)
{
- pr_info("Usage: %s [-h] [-d <test name>]\n", name);
- pr_info("\t-d: Test to disable. Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
+ pr_info("Usage: %s [-h] [-t <test name>] [-n <number of LCOFI interrupt for overflow test>]\n",
+ name);
+ pr_info("\t-t: Test to run (default all). Available tests are 'basic', 'events', 'snapshot', 'overflow'\n");
+ pr_info("\t-n: Number of LCOFI interrupt to trigger for each event in overflow test (default: %d)\n",
+ SBI_PMU_OVERFLOW_IRQNUM_DEFAULT);
pr_info("\t-h: print this help screen\n");
}
static bool parse_args(int argc, char *argv[])
{
int opt;
+ int temp_disabled_tests = SBI_PMU_TEST_BASIC | SBI_PMU_TEST_EVENTS | SBI_PMU_TEST_SNAPSHOT |
+ SBI_PMU_TEST_OVERFLOW;
+ int overflow_interrupts = 0;
- while ((opt = getopt(argc, argv, "hd:")) != -1) {
+ while ((opt = getopt(argc, argv, "ht:n:")) != -1) {
switch (opt) {
- case 'd':
+ case 't':
if (!strncmp("basic", optarg, 5))
- disabled_tests |= SBI_PMU_TEST_BASIC;
+ temp_disabled_tests &= ~SBI_PMU_TEST_BASIC;
else if (!strncmp("events", optarg, 6))
- disabled_tests |= SBI_PMU_TEST_EVENTS;
+ temp_disabled_tests &= ~SBI_PMU_TEST_EVENTS;
else if (!strncmp("snapshot", optarg, 8))
- disabled_tests |= SBI_PMU_TEST_SNAPSHOT;
+ temp_disabled_tests &= ~SBI_PMU_TEST_SNAPSHOT;
else if (!strncmp("overflow", optarg, 8))
- disabled_tests |= SBI_PMU_TEST_OVERFLOW;
+ temp_disabled_tests &= ~SBI_PMU_TEST_OVERFLOW;
else
goto done;
+ targs.disabled_tests = temp_disabled_tests;
+ break;
+ case 'n':
+ overflow_interrupts = atoi_positive("Number of LCOFI", optarg);
break;
case 'h':
default:
@@ -647,6 +666,15 @@ static bool parse_args(int argc, char *argv[])
}
}
+ if (overflow_interrupts > 0) {
+ if (targs.disabled_tests & SBI_PMU_TEST_OVERFLOW) {
+ pr_info("-n option is only available for overflow test\n");
+ goto done;
+ } else {
+ targs.overflow_irqnum = overflow_interrupts;
+ }
+ }
+
return true;
done:
test_print_help(argv[0]);
@@ -655,25 +683,28 @@ done:
int main(int argc, char *argv[])
{
+ targs.disabled_tests = 0;
+ targs.overflow_irqnum = SBI_PMU_OVERFLOW_IRQNUM_DEFAULT;
+
if (!parse_args(argc, argv))
exit(KSFT_SKIP);
- if (!(disabled_tests & SBI_PMU_TEST_BASIC)) {
+ if (!(targs.disabled_tests & SBI_PMU_TEST_BASIC)) {
test_vm_basic_test(test_pmu_basic_sanity);
pr_info("SBI PMU basic test : PASS\n");
}
- if (!(disabled_tests & SBI_PMU_TEST_EVENTS)) {
+ if (!(targs.disabled_tests & SBI_PMU_TEST_EVENTS)) {
test_vm_events_test(test_pmu_events);
pr_info("SBI PMU event verification test : PASS\n");
}
- if (!(disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
+ if (!(targs.disabled_tests & SBI_PMU_TEST_SNAPSHOT)) {
test_vm_events_snapshot_test(test_pmu_events_snaphost);
pr_info("SBI PMU event verification with snapshot test : PASS\n");
}
- if (!(disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
+ if (!(targs.disabled_tests & SBI_PMU_TEST_OVERFLOW)) {
test_vm_events_overflow(test_pmu_events_overflow);
pr_info("SBI PMU event verification with overflow test : PASS\n");
}
diff --git a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
index 2929c067c207..b0d2b04a7ff2 100644
--- a/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
+++ b/tools/testing/selftests/kvm/x86/dirty_log_page_splitting_test.c
@@ -41,9 +41,9 @@ struct kvm_page_stats {
static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
{
- stats->pages_4k = vm_get_stat(vm, "pages_4k");
- stats->pages_2m = vm_get_stat(vm, "pages_2m");
- stats->pages_1g = vm_get_stat(vm, "pages_1g");
+ stats->pages_4k = vm_get_stat(vm, pages_4k);
+ stats->pages_2m = vm_get_stat(vm, pages_2m);
+ stats->pages_1g = vm_get_stat(vm, pages_1g);
stats->hugepages = stats->pages_2m + stats->pages_1g;
pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n",
diff --git a/tools/testing/selftests/kvm/x86/hyperv_ipi.c b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
index 22c0c124582f..2b5b4bc6ef7e 100644
--- a/tools/testing/selftests/kvm/x86/hyperv_ipi.c
+++ b/tools/testing/selftests/kvm/x86/hyperv_ipi.c
@@ -63,8 +63,10 @@ static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
/* Signal sender vCPU we're ready */
ipis_rcvd[vcpu_id] = (u64)-1;
- for (;;)
- asm volatile("sti; hlt; cli");
+ for (;;) {
+ safe_halt();
+ cli();
+ }
}
static void guest_ipi_handler(struct ex_regs *regs)
diff --git a/tools/testing/selftests/kvm/x86/nested_emulation_test.c b/tools/testing/selftests/kvm/x86/nested_emulation_test.c
new file mode 100644
index 000000000000..abc824dba04f
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/nested_emulation_test.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+#include "svm_util.h"
+
+enum {
+ SVM_F,
+ VMX_F,
+ NR_VIRTUALIZATION_FLAVORS,
+};
+
+struct emulated_instruction {
+ const char name[32];
+ uint8_t opcode[15];
+ uint32_t exit_reason[NR_VIRTUALIZATION_FLAVORS];
+};
+
+static struct emulated_instruction instructions[] = {
+ {
+ .name = "pause",
+ .opcode = { 0xf3, 0x90 },
+ .exit_reason = { SVM_EXIT_PAUSE,
+ EXIT_REASON_PAUSE_INSTRUCTION, }
+ },
+ {
+ .name = "hlt",
+ .opcode = { 0xf4 },
+ .exit_reason = { SVM_EXIT_HLT,
+ EXIT_REASON_HLT, }
+ },
+};
+
+static uint8_t kvm_fep[] = { 0x0f, 0x0b, 0x6b, 0x76, 0x6d }; /* ud2 ; .ascii "kvm" */
+static uint8_t l2_guest_code[sizeof(kvm_fep) + 15];
+static uint8_t *l2_instruction = &l2_guest_code[sizeof(kvm_fep)];
+
+static uint32_t get_instruction_length(struct emulated_instruction *insn)
+{
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(insn->opcode) && insn->opcode[i]; i++)
+ ;
+
+ return i;
+}
+
+static void guest_code(void *test_data)
+{
+ int f = this_cpu_has(X86_FEATURE_SVM) ? SVM_F : VMX_F;
+ int i;
+
+ memcpy(l2_guest_code, kvm_fep, sizeof(kvm_fep));
+
+ if (f == SVM_F) {
+ struct svm_test_data *svm = test_data;
+ struct vmcb *vmcb = svm->vmcb;
+
+ generic_svm_setup(svm, NULL, NULL);
+ vmcb->save.idtr.limit = 0;
+ vmcb->save.rip = (u64)l2_guest_code;
+
+ vmcb->control.intercept |= BIT_ULL(INTERCEPT_SHUTDOWN) |
+ BIT_ULL(INTERCEPT_PAUSE) |
+ BIT_ULL(INTERCEPT_HLT);
+ vmcb->control.intercept_exceptions = 0;
+ } else {
+ GUEST_ASSERT(prepare_for_vmx_operation(test_data));
+ GUEST_ASSERT(load_vmcs(test_data));
+
+ prepare_vmcs(test_data, NULL, NULL);
+ GUEST_ASSERT(!vmwrite(GUEST_IDTR_LIMIT, 0));
+ GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_guest_code));
+ GUEST_ASSERT(!vmwrite(EXCEPTION_BITMAP, 0));
+
+ vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmreadz(CPU_BASED_VM_EXEC_CONTROL) |
+ CPU_BASED_PAUSE_EXITING |
+ CPU_BASED_HLT_EXITING);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(instructions); i++) {
+ struct emulated_instruction *insn = &instructions[i];
+ uint32_t insn_len = get_instruction_length(insn);
+ uint32_t exit_insn_len;
+ u32 exit_reason;
+
+ /*
+ * Copy the target instruction to the L2 code stream, and fill
+ * the remaining bytes with INT3s so that a missed intercept
+ * results in a consistent failure mode (SHUTDOWN).
+ */
+ memcpy(l2_instruction, insn->opcode, insn_len);
+ memset(l2_instruction + insn_len, 0xcc, sizeof(insn->opcode) - insn_len);
+
+ if (f == SVM_F) {
+ struct svm_test_data *svm = test_data;
+ struct vmcb *vmcb = svm->vmcb;
+
+ run_guest(vmcb, svm->vmcb_gpa);
+ exit_reason = vmcb->control.exit_code;
+ exit_insn_len = vmcb->control.next_rip - vmcb->save.rip;
+ GUEST_ASSERT_EQ(vmcb->save.rip, (u64)l2_instruction);
+ } else {
+ GUEST_ASSERT_EQ(i ? vmresume() : vmlaunch(), 0);
+ exit_reason = vmreadz(VM_EXIT_REASON);
+ exit_insn_len = vmreadz(VM_EXIT_INSTRUCTION_LEN);
+ GUEST_ASSERT_EQ(vmreadz(GUEST_RIP), (u64)l2_instruction);
+ }
+
+ __GUEST_ASSERT(exit_reason == insn->exit_reason[f],
+ "Wanted exit_reason '0x%x' for '%s', got '0x%x'",
+ insn->exit_reason[f], insn->name, exit_reason);
+
+ __GUEST_ASSERT(exit_insn_len == insn_len,
+ "Wanted insn_len '%u' for '%s', got '%u'",
+ insn_len, insn->name, exit_insn_len);
+ }
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t nested_test_data_gva;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(is_forced_emulation_enabled);
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
+
+ if (kvm_cpu_has(X86_FEATURE_SVM))
+ vcpu_alloc_svm(vm, &nested_test_data_gva);
+ else
+ vcpu_alloc_vmx(vm, &nested_test_data_gva);
+
+ vcpu_args_set(vcpu, 1, nested_test_data_gva);
+
+ vcpu_run(vcpu);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
index e7efb2b35f8b..c0d84827f736 100644
--- a/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86/nx_huge_pages_test.c
@@ -73,7 +73,7 @@ static void check_2m_page_count(struct kvm_vm *vm, int expected_pages_2m)
{
int actual_pages_2m;
- actual_pages_2m = vm_get_stat(vm, "pages_2m");
+ actual_pages_2m = vm_get_stat(vm, pages_2m);
TEST_ASSERT(actual_pages_2m == expected_pages_2m,
"Unexpected 2m page count. Expected %d, got %d",
@@ -84,7 +84,7 @@ static void check_split_count(struct kvm_vm *vm, int expected_splits)
{
int actual_splits;
- actual_splits = vm_get_stat(vm, "nx_lpage_splits");
+ actual_splits = vm_get_stat(vm, nx_lpage_splits);
TEST_ASSERT(actual_splits == expected_splits,
"Unexpected NX huge page split count. Expected %d, got %d",
diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
index 698cb36989db..8aaaf25b6111 100644
--- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
@@ -17,7 +17,7 @@
* Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
* 1 LOOP.
*/
-#define NUM_INSNS_PER_LOOP 3
+#define NUM_INSNS_PER_LOOP 4
/*
* Number of "extra" instructions that will be counted, i.e. the number of
@@ -29,10 +29,59 @@
/* Total number of instructions retired within the measured section. */
#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS)
+/* Track which architectural events are supported by hardware. */
+static uint32_t hardware_pmu_arch_events;
static uint8_t kvm_pmu_version;
static bool kvm_has_perf_caps;
+#define X86_PMU_FEATURE_NULL \
+({ \
+ struct kvm_x86_pmu_feature feature = {}; \
+ \
+ feature; \
+})
+
+static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
+{
+ return !(*(u64 *)&event);
+}
+
+struct kvm_intel_pmu_event {
+ struct kvm_x86_pmu_feature gp_event;
+ struct kvm_x86_pmu_feature fixed_event;
+};
+
+/*
+ * Wrap the array to appease the compiler, as the macros used to construct each
+ * kvm_x86_pmu_feature use syntax that's only valid in function scope, and the
+ * compiler often thinks the feature definitions aren't compile-time constants.
+ */
+static struct kvm_intel_pmu_event intel_event_to_feature(uint8_t idx)
+{
+ const struct kvm_intel_pmu_event __intel_event_to_feature[] = {
+ [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
+ [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
+ /*
+ * Note, the fixed counter for reference cycles is NOT the same as the
+ * general purpose architectural event. The fixed counter explicitly
+ * counts at the same frequency as the TSC, whereas the GP event counts
+ * at a fixed, but uarch specific, frequency. Bundle them here for
+ * simplicity.
+ */
+ [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
+ [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
+ [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
+ };
+
+ kvm_static_assert(ARRAY_SIZE(__intel_event_to_feature) == NR_INTEL_ARCH_EVENTS);
+
+ return __intel_event_to_feature[idx];
+}
+
static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
void *guest_code,
uint8_t pmu_version,
@@ -42,6 +91,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
vm = vm_create_with_one_vcpu(vcpu, guest_code);
sync_global_to_guest(vm, kvm_pmu_version);
+ sync_global_to_guest(vm, hardware_pmu_arch_events);
/*
* Set PERF_CAPABILITIES before PMU version as KVM disallows enabling
@@ -98,14 +148,12 @@ static uint8_t guest_get_pmu_version(void)
* Sanity check that in all cases, the event doesn't count when it's disabled,
* and that KVM correctly emulates the write of an arbitrary value.
*/
-static void guest_assert_event_count(uint8_t idx,
- struct kvm_x86_pmu_feature event,
- uint32_t pmc, uint32_t pmc_msr)
+static void guest_assert_event_count(uint8_t idx, uint32_t pmc, uint32_t pmc_msr)
{
uint64_t count;
count = _rdpmc(pmc);
- if (!this_pmu_has(event))
+ if (!(hardware_pmu_arch_events & BIT(idx)))
goto sanity_checks;
switch (idx) {
@@ -126,7 +174,9 @@ static void guest_assert_event_count(uint8_t idx,
GUEST_ASSERT_NE(count, 0);
break;
case INTEL_ARCH_TOPDOWN_SLOTS_INDEX:
- GUEST_ASSERT(count >= NUM_INSNS_RETIRED);
+ __GUEST_ASSERT(count >= NUM_INSNS_RETIRED,
+ "Expected top-down slots >= %u, got count = %lu",
+ NUM_INSNS_RETIRED, count);
break;
default:
break;
@@ -162,75 +212,42 @@ do { \
"1:\n\t" \
clflush "\n\t" \
"mfence\n\t" \
+ "mov %[m], %%eax\n\t" \
FEP "loop 1b\n\t" \
FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \
FEP "xor %%edx, %%edx\n\t" \
"wrmsr\n\t" \
:: "a"((uint32_t)_value), "d"(_value >> 32), \
- "c"(_msr), "D"(_msr) \
+ "c"(_msr), "D"(_msr), [m]"m"(kvm_pmu_version) \
); \
} while (0)
-#define GUEST_TEST_EVENT(_idx, _event, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \
+#define GUEST_TEST_EVENT(_idx, _pmc, _pmc_msr, _ctrl_msr, _value, FEP) \
do { \
- wrmsr(pmc_msr, 0); \
+ wrmsr(_pmc_msr, 0); \
\
if (this_cpu_has(X86_FEATURE_CLFLUSHOPT)) \
- GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt .", FEP); \
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflushopt %[m]", FEP); \
else if (this_cpu_has(X86_FEATURE_CLFLUSH)) \
- GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush .", FEP); \
+ GUEST_MEASURE_EVENT(_ctrl_msr, _value, "clflush %[m]", FEP); \
else \
GUEST_MEASURE_EVENT(_ctrl_msr, _value, "nop", FEP); \
\
- guest_assert_event_count(_idx, _event, _pmc, _pmc_msr); \
+ guest_assert_event_count(_idx, _pmc, _pmc_msr); \
} while (0)
-static void __guest_test_arch_event(uint8_t idx, struct kvm_x86_pmu_feature event,
- uint32_t pmc, uint32_t pmc_msr,
+static void __guest_test_arch_event(uint8_t idx, uint32_t pmc, uint32_t pmc_msr,
uint32_t ctrl_msr, uint64_t ctrl_msr_value)
{
- GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
+ GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, "");
if (is_forced_emulation_enabled)
- GUEST_TEST_EVENT(idx, event, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
-}
-
-#define X86_PMU_FEATURE_NULL \
-({ \
- struct kvm_x86_pmu_feature feature = {}; \
- \
- feature; \
-})
-
-static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
-{
- return !(*(u64 *)&event);
+ GUEST_TEST_EVENT(idx, pmc, pmc_msr, ctrl_msr, ctrl_msr_value, KVM_FEP);
}
static void guest_test_arch_event(uint8_t idx)
{
- const struct {
- struct kvm_x86_pmu_feature gp_event;
- struct kvm_x86_pmu_feature fixed_event;
- } intel_event_to_feature[] = {
- [INTEL_ARCH_CPU_CYCLES_INDEX] = { X86_PMU_FEATURE_CPU_CYCLES, X86_PMU_FEATURE_CPU_CYCLES_FIXED },
- [INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX] = { X86_PMU_FEATURE_INSNS_RETIRED, X86_PMU_FEATURE_INSNS_RETIRED_FIXED },
- /*
- * Note, the fixed counter for reference cycles is NOT the same
- * as the general purpose architectural event. The fixed counter
- * explicitly counts at the same frequency as the TSC, whereas
- * the GP event counts at a fixed, but uarch specific, frequency.
- * Bundle them here for simplicity.
- */
- [INTEL_ARCH_REFERENCE_CYCLES_INDEX] = { X86_PMU_FEATURE_REFERENCE_CYCLES, X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED },
- [INTEL_ARCH_LLC_REFERENCES_INDEX] = { X86_PMU_FEATURE_LLC_REFERENCES, X86_PMU_FEATURE_NULL },
- [INTEL_ARCH_LLC_MISSES_INDEX] = { X86_PMU_FEATURE_LLC_MISSES, X86_PMU_FEATURE_NULL },
- [INTEL_ARCH_BRANCHES_RETIRED_INDEX] = { X86_PMU_FEATURE_BRANCH_INSNS_RETIRED, X86_PMU_FEATURE_NULL },
- [INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX] = { X86_PMU_FEATURE_BRANCHES_MISPREDICTED, X86_PMU_FEATURE_NULL },
- [INTEL_ARCH_TOPDOWN_SLOTS_INDEX] = { X86_PMU_FEATURE_TOPDOWN_SLOTS, X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED },
- };
-
uint32_t nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
uint32_t pmu_version = guest_get_pmu_version();
/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
@@ -248,7 +265,7 @@ static void guest_test_arch_event(uint8_t idx)
else
base_pmc_msr = MSR_IA32_PERFCTR0;
- gp_event = intel_event_to_feature[idx].gp_event;
+ gp_event = intel_event_to_feature(idx).gp_event;
GUEST_ASSERT_EQ(idx, gp_event.f.bit);
GUEST_ASSERT(nr_gp_counters);
@@ -262,14 +279,14 @@ static void guest_test_arch_event(uint8_t idx)
if (guest_has_perf_global_ctrl)
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(i));
- __guest_test_arch_event(idx, gp_event, i, base_pmc_msr + i,
+ __guest_test_arch_event(idx, i, base_pmc_msr + i,
MSR_P6_EVNTSEL0 + i, eventsel);
}
if (!guest_has_perf_global_ctrl)
return;
- fixed_event = intel_event_to_feature[idx].fixed_event;
+ fixed_event = intel_event_to_feature(idx).fixed_event;
if (pmu_is_null_feature(fixed_event) || !this_pmu_has(fixed_event))
return;
@@ -277,7 +294,7 @@ static void guest_test_arch_event(uint8_t idx)
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL));
- __guest_test_arch_event(idx, fixed_event, i | INTEL_RDPMC_FIXED,
+ __guest_test_arch_event(idx, i | INTEL_RDPMC_FIXED,
MSR_CORE_PERF_FIXED_CTR0 + i,
MSR_CORE_PERF_GLOBAL_CTRL,
FIXED_PMC_GLOBAL_CTRL_ENABLE(i));
@@ -331,9 +348,9 @@ __GUEST_ASSERT(expect_gp ? vector == GP_VECTOR : !vector, \
expect_gp ? "#GP" : "no fault", msr, vector) \
#define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \
- __GUEST_ASSERT(val == expected_val, \
+ __GUEST_ASSERT(val == expected, \
"Expected " #insn "(0x%x) to yield 0x%lx, got 0x%lx", \
- msr, expected_val, val);
+ msr, expected, val);
static void guest_test_rdpmc(uint32_t rdpmc_idx, bool expect_success,
uint64_t expected_val)
@@ -545,7 +562,6 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
static void test_intel_counters(void)
{
- uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
uint8_t nr_gp_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
uint8_t pmu_version = kvm_cpu_property(X86_PROPERTY_PMU_VERSION);
@@ -567,18 +583,26 @@ static void test_intel_counters(void)
/*
* Detect the existence of events that aren't supported by selftests.
- * This will (obviously) fail any time the kernel adds support for a
- * new event, but it's worth paying that price to keep the test fresh.
+ * This will (obviously) fail any time hardware adds support for a new
+ * event, but it's worth paying that price to keep the test fresh.
*/
- TEST_ASSERT(nr_arch_events <= NR_INTEL_ARCH_EVENTS,
+ TEST_ASSERT(this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH) <= NR_INTEL_ARCH_EVENTS,
"New architectural event(s) detected; please update this test (length = %u, mask = %x)",
- nr_arch_events, kvm_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK));
+ this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH),
+ this_cpu_property(X86_PROPERTY_PMU_EVENTS_MASK));
/*
- * Force iterating over known arch events regardless of whether or not
- * KVM/hardware supports a given event.
+ * Iterate over known arch events irrespective of KVM/hardware support
+ * to verify that KVM doesn't reject programming of events just because
+ * the *architectural* encoding is unsupported. Track which events are
+ * supported in hardware; the guest side will validate supported events
+ * count correctly, even if *enumeration* of the event is unsupported
+ * by KVM and/or isn't exposed to the guest.
*/
- nr_arch_events = max_t(typeof(nr_arch_events), nr_arch_events, NR_INTEL_ARCH_EVENTS);
+ for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) {
+ if (this_pmu_has(intel_event_to_feature(i).gp_event))
+ hardware_pmu_arch_events |= BIT(i);
+ }
for (v = 0; v <= max_pmu_version; v++) {
for (i = 0; i < ARRAY_SIZE(perf_caps); i++) {
@@ -594,8 +618,8 @@ static void test_intel_counters(void)
* vector length.
*/
if (v == pmu_version) {
- for (k = 1; k < (BIT(nr_arch_events) - 1); k++)
- test_arch_events(v, perf_caps[i], nr_arch_events, k);
+ for (k = 1; k < (BIT(NR_INTEL_ARCH_EVENTS) - 1); k++)
+ test_arch_events(v, perf_caps[i], NR_INTEL_ARCH_EVENTS, k);
}
/*
* Test single bits for all PMU version and lengths up
@@ -604,11 +628,11 @@ static void test_intel_counters(void)
* host length). Explicitly test a mask of '0' and all
* ones i.e. all events being available and unavailable.
*/
- for (j = 0; j <= nr_arch_events + 1; j++) {
+ for (j = 0; j <= NR_INTEL_ARCH_EVENTS + 1; j++) {
test_arch_events(v, perf_caps[i], j, 0);
test_arch_events(v, perf_caps[i], j, 0xff);
- for (k = 0; k < nr_arch_events; k++)
+ for (k = 0; k < NR_INTEL_ARCH_EVENTS; k++)
test_arch_events(v, perf_caps[i], j, BIT(k));
}
diff --git a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
index 916e04248fbb..917b6066cfc1 100644
--- a/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
+++ b/tools/testing/selftests/kvm/x86/svm_int_ctl_test.c
@@ -42,10 +42,7 @@ static void l2_guest_code(struct svm_test_data *svm)
x2apic_write_reg(APIC_ICR,
APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER);
- __asm__ __volatile__(
- "sti\n"
- "nop\n"
- );
+ sti_nop();
GUEST_ASSERT(vintr_irq_called);
GUEST_ASSERT(intr_irq_called);
diff --git a/tools/testing/selftests/kvm/x86/ucna_injection_test.c b/tools/testing/selftests/kvm/x86/ucna_injection_test.c
index 57f157c06b39..1e5e564523b3 100644
--- a/tools/testing/selftests/kvm/x86/ucna_injection_test.c
+++ b/tools/testing/selftests/kvm/x86/ucna_injection_test.c
@@ -86,7 +86,7 @@ static void ucna_injection_guest_code(void)
wrmsr(MSR_IA32_MCx_CTL2(UCNA_BANK), ctl2 | MCI_CTL2_CMCI_EN);
/* Enables interrupt in guest. */
- asm volatile("sti");
+ sti();
/* Let user space inject the first UCNA */
GUEST_SYNC(SYNC_FIRST_UCNA);
diff --git a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
index a76078a08ff8..35cb9de54a82 100644
--- a/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_ipi_test.c
@@ -106,7 +106,8 @@ static void halter_guest_code(struct test_data_page *data)
data->halter_tpr = xapic_read_reg(APIC_TASKPRI);
data->halter_ppr = xapic_read_reg(APIC_PROCPRI);
data->hlt_count++;
- asm volatile("sti; hlt; cli");
+ safe_halt();
+ cli();
data->wake_count++;
}
}
@@ -465,6 +466,19 @@ int main(int argc, char *argv[])
cancel_join_vcpu_thread(threads[0], params[0].vcpu);
cancel_join_vcpu_thread(threads[1], params[1].vcpu);
+ /*
+ * If the host support Idle HLT, i.e. KVM *might* be using Idle HLT,
+ * then the number of HLT exits may be less than the number of HLTs
+ * that were executed, as Idle HLT elides the exit if the vCPU has an
+ * unmasked, pending IRQ (or NMI).
+ */
+ if (this_cpu_has(X86_FEATURE_IDLE_HLT))
+ TEST_ASSERT(data->hlt_count >= vcpu_get_stat(params[0].vcpu, halt_exits),
+ "HLT insns = %lu, HLT exits = %lu",
+ data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits));
+ else
+ TEST_ASSERT_EQ(data->hlt_count, vcpu_get_stat(params[0].vcpu, halt_exits));
+
fprintf(stderr,
"Test successful after running for %d seconds.\n"
"Sending vCPU sent %lu IPIs to halting vCPU\n"
diff --git a/tools/testing/selftests/kvm/x86/xapic_state_test.c b/tools/testing/selftests/kvm/x86/xapic_state_test.c
index 88bcca188799..fdebff1165c7 100644
--- a/tools/testing/selftests/kvm/x86/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86/xapic_state_test.c
@@ -18,7 +18,7 @@ struct xapic_vcpu {
static void xapic_guest_code(void)
{
- asm volatile("cli");
+ cli();
xapic_enable();
@@ -38,7 +38,7 @@ static void xapic_guest_code(void)
static void x2apic_guest_code(void)
{
- asm volatile("cli");
+ cli();
x2apic_enable();
diff --git a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
index a59b3c799bb2..287829f850f7 100644
--- a/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86/xen_shinfo_test.c
@@ -191,10 +191,7 @@ static void guest_code(void)
struct vcpu_runstate_info *rs = (void *)RUNSTATE_VADDR;
int i;
- __asm__ __volatile__(
- "sti\n"
- "nop\n"
- );
+ sti_nop();
/* Trigger an interrupt injection */
GUEST_SYNC(TEST_INJECT_VECTOR);
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index d6edcfcb5be8..530390033929 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -228,4 +228,7 @@ $(OUTPUT)/%:%.S
$(LINK.S) $^ $(LDLIBS) -o $@
endif
-.PHONY: run_tests all clean install emit_tests gen_mods_dir clean_mods_dir
+headers:
+ $(Q)$(MAKE) -C $(top_srcdir) headers
+
+.PHONY: run_tests all clean install emit_tests gen_mods_dir clean_mods_dir headers
diff --git a/tools/testing/selftests/lib/Makefile b/tools/testing/selftests/lib/Makefile
index c52fe3ad8e98..f876bf4744e1 100644
--- a/tools/testing/selftests/lib/Makefile
+++ b/tools/testing/selftests/lib/Makefile
@@ -4,5 +4,5 @@
# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
all:
-TEST_PROGS := printf.sh bitmap.sh prime_numbers.sh scanf.sh
+TEST_PROGS := bitmap.sh
include ../lib.mk
diff --git a/tools/testing/selftests/lib/config b/tools/testing/selftests/lib/config
index dc15aba8d0a3..81a1f64a22e8 100644
--- a/tools/testing/selftests/lib/config
+++ b/tools/testing/selftests/lib/config
@@ -1,5 +1,2 @@
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
-CONFIG_PRIME_NUMBERS=m
CONFIG_TEST_BITOPS=m
diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh
deleted file mode 100755
index 370b79a9cb2e..000000000000
--- a/tools/testing/selftests/lib/prime_numbers.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Checks fast/slow prime_number generation for inconsistencies
-$(dirname $0)/../kselftest/module.sh "prime numbers" prime_numbers selftest=65536
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
deleted file mode 100755
index 05f4544e87f9..000000000000
--- a/tools/testing/selftests/lib/printf.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Tests the printf infrastructure using test_printf kernel module.
-$(dirname $0)/../kselftest/module.sh "printf" test_printf
diff --git a/tools/testing/selftests/lib/scanf.sh b/tools/testing/selftests/lib/scanf.sh
deleted file mode 100755
index b59b8ba561c3..000000000000
--- a/tools/testing/selftests/lib/scanf.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Tests the scanf infrastructure using test_scanf kernel module.
-$(dirname $0)/../kselftest/module.sh "scanf" test_scanf
diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-pages.c
index ece37212a8a2..525c50d3ec23 100644
--- a/tools/testing/selftests/mm/guard-pages.c
+++ b/tools/testing/selftests/mm/guard-pages.c
@@ -19,6 +19,8 @@
#include <sys/uio.h>
#include <unistd.h>
+#include "../pidfd/pidfd.h"
+
/*
* Ignore the checkpatch warning, as per the C99 standard, section 7.14.1.1:
*
@@ -50,11 +52,6 @@ static void handle_fatal(int c)
siglongjmp(signal_jmp_buf, c);
}
-static int pidfd_open(pid_t pid, unsigned int flags)
-{
- return syscall(SYS_pidfd_open, pid, flags);
-}
-
static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec,
size_t n, int advice, unsigned int flags)
{
@@ -370,14 +367,10 @@ TEST_F(guard_pages, multi_vma)
TEST_F(guard_pages, process_madvise)
{
const unsigned long page_size = self->page_size;
- pid_t pid = getpid();
- int pidfd = pidfd_open(pid, 0);
char *ptr_region, *ptr1, *ptr2, *ptr3;
ssize_t count;
struct iovec vec[6];
- ASSERT_NE(pidfd, -1);
-
/* Reserve region to map over. */
ptr_region = mmap(NULL, 100 * page_size, PROT_NONE,
MAP_ANON | MAP_PRIVATE, -1, 0);
@@ -425,7 +418,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0);
/* Now guard in one step. */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_INSTALL, 0);
/* OK we don't have permission to do this, skip. */
if (count == -1 && errno == EPERM)
@@ -446,7 +439,7 @@ TEST_F(guard_pages, process_madvise)
ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size]));
/* Now do the same with unguard... */
- count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0);
+ count = sys_process_madvise(PIDFD_SELF, vec, 6, MADV_GUARD_REMOVE, 0);
/* ...and everything should now succeed. */
@@ -463,7 +456,6 @@ TEST_F(guard_pages, process_madvise)
ASSERT_EQ(munmap(ptr1, 10 * page_size), 0);
ASSERT_EQ(munmap(ptr2, 5 * page_size), 0);
ASSERT_EQ(munmap(ptr3, 20 * page_size), 0);
- close(pidfd);
}
/* Assert that unmapping ranges does not leave guard markers behind. */
diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c
index ad17005521a8..005f29c86484 100644
--- a/tools/testing/selftests/mm/mseal_test.c
+++ b/tools/testing/selftests/mm/mseal_test.c
@@ -218,7 +218,7 @@ bool seal_support(void)
bool pkey_supported(void)
{
#if defined(__i386__) || defined(__x86_64__) /* arch */
- int pkey = sys_pkey_alloc(0, 0);
+ int pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
if (pkey > 0)
return true;
@@ -1671,7 +1671,7 @@ static void test_seal_discard_ro_anon_on_pkey(bool seal)
setup_single_address_rw(size, &ptr);
FAIL_TEST_IF_FALSE(ptr != (void *)-1);
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
FAIL_TEST_IF_FALSE(pkey > 0);
ret = sys_mprotect_pkey((void *)ptr, size, PROT_READ | PROT_WRITE, pkey);
@@ -1683,7 +1683,7 @@ static void test_seal_discard_ro_anon_on_pkey(bool seal)
}
/* sealing doesn't take effect if PKRU allow write. */
- set_pkey(pkey, 0);
+ set_pkey(pkey, PKEY_UNRESTRICTED);
ret = sys_madvise(ptr, size, MADV_DONTNEED);
FAIL_TEST_IF_FALSE(!ret);
diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h
index f080e97b39be..ea404f80e6cb 100644
--- a/tools/testing/selftests/mm/pkey-helpers.h
+++ b/tools/testing/selftests/mm/pkey-helpers.h
@@ -13,6 +13,7 @@
#include <ucontext.h>
#include <sys/mman.h>
+#include <linux/mman.h>
#include <linux/types.h>
#include "../kselftest.h"
@@ -193,7 +194,7 @@ static inline u32 *siginfo_get_pkey_ptr(siginfo_t *si)
static inline int kernel_has_pkeys(void)
{
/* try allocating a key and see if it succeeds */
- int ret = sys_pkey_alloc(0, 0);
+ int ret = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
if (ret <= 0) {
return 0;
}
diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c
index 1ac8c8809880..b5e076a564c9 100644
--- a/tools/testing/selftests/mm/pkey_sighandler_tests.c
+++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c
@@ -311,7 +311,7 @@ static void test_sigsegv_handler_with_different_pkey_for_stack(void)
__write_pkey_reg(pkey_reg);
/* Protect the new stack with MPK 1 */
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey);
/* Set up alternate signal stack that will use the default MPK */
@@ -484,7 +484,7 @@ static void test_pkru_sigreturn(void)
__write_pkey_reg(pkey_reg);
/* Protect the stack with MPK 2 */
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey);
/* Set up alternate signal stack that will use the default MPK */
diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c
index 35565af308af..23ebec367015 100644
--- a/tools/testing/selftests/mm/protection_keys.c
+++ b/tools/testing/selftests/mm/protection_keys.c
@@ -463,7 +463,7 @@ static pid_t fork_lazy_child(void)
static int alloc_pkey(void)
{
int ret;
- unsigned long init_val = 0x0;
+ unsigned long init_val = PKEY_UNRESTRICTED;
dprintf1("%s()::%d, pkey_reg: 0x%016llx shadow: %016llx\n",
__func__, __LINE__, __read_pkey_reg(), shadow_pkey_reg);
diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
index 70f65eb320a7..48a000cabc97 100644
--- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c
+++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c
@@ -20,6 +20,7 @@
#include <stdarg.h>
#include <linux/mount.h>
+#include "../filesystems/overlayfs/wrappers.h"
#include "../kselftest_harness.h"
#ifndef CLONE_NEWNS
@@ -126,6 +127,26 @@
#endif
#endif
+#ifndef __NR_move_mount
+ #if defined __alpha__
+ #define __NR_move_mount 539
+ #elif defined _MIPS_SIM
+ #if _MIPS_SIM == _MIPS_SIM_ABI32 /* o32 */
+ #define __NR_move_mount 4429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_NABI32 /* n32 */
+ #define __NR_move_mount 6429
+ #endif
+ #if _MIPS_SIM == _MIPS_SIM_ABI64 /* n64 */
+ #define __NR_move_mount 5429
+ #endif
+ #elif defined __ia64__
+ #define __NR_move_mount (428 + 1024)
+ #else
+ #define __NR_move_mount 429
+ #endif
+#endif
+
#ifndef MOUNT_ATTR_IDMAP
#define MOUNT_ATTR_IDMAP 0x00100000
#endif
@@ -397,6 +418,10 @@ FIXTURE_SETUP(mount_setattr)
ASSERT_EQ(mkdir("/tmp/B/BB", 0777), 0);
+ ASSERT_EQ(mkdir("/tmp/target1", 0777), 0);
+
+ ASSERT_EQ(mkdir("/tmp/target2", 0777), 0);
+
ASSERT_EQ(mount("testing", "/tmp/B/BB", "tmpfs", MS_NOATIME | MS_NODEV,
"size=100000,mode=700"), 0);
@@ -1506,4 +1531,631 @@ TEST_F(mount_setattr, mount_attr_nosymfollow)
ASSERT_EQ(close(fd), 0);
}
+TEST_F(mount_setattr, open_tree_detached)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /AA testing tmpfs
+ * `-/AA/B testing tmpfs
+ * `-/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_subdir, "B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ /*
+ * /tmp/target1 testing tmpfs
+ * `-/tmp/target1/B testing tmpfs
+ * `-/tmp/target1/B/BB testing tmpfs
+ */
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target2", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ /*
+ * /tmp/target2 testing tmpfs
+ * |-/tmp/target2/A testing tmpfs
+ * | `-/tmp/target2/A/AA testing tmpfs
+ * | `-/tmp/target2/A/AA/B testing tmpfs
+ * | `-/tmp/target2/A/AA/B/BB testing tmpfs
+ * `-/tmp/target2/B testing ramfs
+ */
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target2/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+
+ /*
+ * The origin mount namespace of the anonymous mount namespace
+ * of @fd_tree_base doesn't match the caller's mount namespace
+ * anymore so creation of another detached mounts must fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail2)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(create_and_enter_userns(), 0);
+
+ /*
+ * The caller entered a new user namespace. They will have
+ * CAP_SYS_ADMIN in this user namespace. However, they're still
+ * located in a mount namespace that is owned by an ancestor
+ * user namespace in which they hold no privilege. Creating a
+ * detached mount must thus fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EPERM);
+}
+
+TEST_F(mount_setattr, open_tree_detached_fail3)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_EQ(statx(fd_tree_base, "A/AA/B/BB", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ EXPECT_EQ(prepare_unpriv_mountns(), 0);
+
+ /*
+ * The caller entered a new mount namespace. They will have
+ * CAP_SYS_ADMIN in the owning user namespace of their mount
+ * namespace.
+ *
+ * However, the origin mount namespace of the anonymous mount
+ * namespace of @fd_tree_base doesn't match the caller's mount
+ * namespace anymore so creation of another detached mounts must
+ * fail.
+ */
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "A/AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_LT(fd_tree_subdir, 0);
+ ASSERT_EQ(errno, EINVAL);
+}
+
+TEST_F(mount_setattr, open_tree_subfolder)
+{
+ int fd_context, fd_tmpfs, fd_tree;
+
+ fd_context = sys_fsopen("tmpfs", 0);
+ ASSERT_GE(fd_context, 0);
+
+ ASSERT_EQ(sys_fsconfig(fd_context, FSCONFIG_CMD_CREATE, NULL, NULL, 0), 0);
+
+ fd_tmpfs = sys_fsmount(fd_context, 0, 0);
+ ASSERT_GE(fd_tmpfs, 0);
+
+ EXPECT_EQ(close(fd_context), 0);
+
+ ASSERT_EQ(mkdirat(fd_tmpfs, "subdir", 0755), 0);
+
+ fd_tree = sys_open_tree(fd_tmpfs, "subdir",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ EXPECT_EQ(close(fd_tmpfs), 0);
+
+ ASSERT_EQ(mkdirat(-EBADF, "/mnt/open_tree_subfolder", 0755), 0);
+
+ ASSERT_EQ(sys_move_mount(fd_tree, "", -EBADF, "/mnt/open_tree_subfolder", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ EXPECT_EQ(close(fd_tree), 0);
+
+ ASSERT_EQ(umount2("/mnt/open_tree_subfolder", 0), 0);
+
+ EXPECT_EQ(rmdir("/mnt/open_tree_subfolder"), 0);
+}
+
+TEST_F(mount_setattr, mount_detached_mount_on_detached_mount_then_close)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /*
+ * /mnt testing tmpfs
+ * `-/mnt testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "", AT_EMPTY_PATH, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_NE(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, mount_detached_mount_on_detached_mount_and_attach)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+ __u64 mnt_id = 0;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(fd_tree_base, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ /*
+ * /mnt testing tmpfs
+ */
+ ASSERT_EQ(statx(fd_tree_subdir, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /*
+ * /mnt testing tmpfs
+ * `-/mnt testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "", AT_EMPTY_PATH, STATX_MNT_ID_UNIQUE, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_TRUE(stx.stx_mask & STATX_MNT_ID_UNIQUE);
+ mnt_id = stx.stx_mnt_id;
+
+ ASSERT_NE(move_mount(fd_tree_subdir, "", fd_tree_base, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, STATX_MNT_ID_UNIQUE, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+ ASSERT_TRUE(stx.stx_mask & STATX_MNT_ID_UNIQUE);
+ ASSERT_EQ(stx.stx_mnt_id, mnt_id);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, move_mount_detached_fail)
+{
+ int fd_tree_base = -EBADF, fd_tree_subdir = -EBADF;
+ struct statx stx;
+
+ fd_tree_base = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_base, 0);
+
+ /* Attach the mount to the caller's mount namespace. */
+ ASSERT_EQ(move_mount(fd_tree_base, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(statx(fd_tree_base, "A", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ fd_tree_subdir = sys_open_tree(-EBADF, "/tmp/B",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ OPEN_TREE_CLOEXEC | OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree_subdir, 0);
+ ASSERT_EQ(statx(fd_tree_subdir, "BB", 0, 0, &stx), 0);
+ ASSERT_FALSE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /* Not allowed to move an attached mount to a detached mount. */
+ ASSERT_NE(move_mount(fd_tree_base, "", fd_tree_subdir, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+ ASSERT_EQ(errno, EINVAL);
+
+ EXPECT_EQ(close(fd_tree_base), 0);
+ EXPECT_EQ(close(fd_tree_subdir), 0);
+}
+
+TEST_F(mount_setattr, attach_detached_mount_then_umount_then_close)
+{
+ int fd_tree = -EBADF;
+ struct statx stx;
+
+ fd_tree = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx), 0);
+ /* We copied with AT_RECURSIVE so /mnt/A must be a mountpoint. */
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ /* Attach the mount to the caller's mount namespace. */
+ ASSERT_EQ(move_mount(fd_tree, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(statx(-EBADF, "/tmp/target1", 0, 0, &stx), 0);
+ ASSERT_TRUE(stx.stx_attributes & STATX_ATTR_MOUNT_ROOT);
+
+ ASSERT_EQ(umount2("/tmp/target1", MNT_DETACH), 0);
+
+ /*
+ * This tests whether dissolve_on_fput() handles a NULL mount
+ * namespace correctly, i.e., that it doesn't splat.
+ */
+ EXPECT_EQ(close(fd_tree), 0);
+}
+
+TEST_F(mount_setattr, mount_detached1_onto_detached2_then_close_detached1_then_mount_detached2_onto_attached)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * `-/mnt/B testing ramfs
+ */
+ fd_tree2 = sys_open_tree(-EBADF, "/mnt/B",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * Move the source detached mount tree to the target detached
+ * mount tree. This will move all the mounts in the source mount
+ * tree from the source anonymous mount namespace to the target
+ * anonymous mount namespace.
+ *
+ * The source detached mount tree and the target detached mount
+ * tree now both refer to the same anonymous mount namespace.
+ *
+ * |-"" testing ramfs
+ * `-"" testing tmpfs
+ * `-""/AA testing tmpfs
+ * `-""/AA/B testing tmpfs
+ * `-""/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree1, "", fd_tree2, "", MOVE_MOUNT_F_EMPTY_PATH | MOVE_MOUNT_T_EMPTY_PATH), 0);
+
+ /*
+ * The source detached mount tree @fd_tree1 is now an attached
+ * mount, i.e., it has a parent. Specifically, it now has the
+ * root mount of the mount tree of @fd_tree2 as its parent.
+ *
+ * That means we are no longer allowed to attach it as we only
+ * allow attaching the root of an anonymous mount tree, not
+ * random bits and pieces. Verify that the kernel enforces this.
+ */
+ ASSERT_NE(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ /*
+ * Closing the source detached mount tree must not unmount and
+ * free the shared anonymous mount namespace. The kernel will
+ * quickly yell at us because the anonymous mount namespace
+ * won't be empty when it's freed.
+ */
+ EXPECT_EQ(close(fd_tree1), 0);
+
+ /*
+ * Attach the mount tree to a non-anonymous mount namespace.
+ * This can only succeed if closing fd_tree1 had proper
+ * semantics and didn't cause the anonymous mount namespace to
+ * be freed. If it did this will trigger a UAF which will be
+ * visible on any KASAN enabled kernel.
+ *
+ * |-/tmp/target1 testing ramfs
+ * `-/tmp/target1 testing tmpfs
+ * `-/tmp/target1/AA testing tmpfs
+ * `-/tmp/target1/AA/B testing tmpfs
+ * `-/tmp/target1/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+ EXPECT_EQ(close(fd_tree2), 0);
+}
+
+TEST_F(mount_setattr, two_detached_mounts_referring_to_same_anonymous_mount_namespace)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * Create an O_PATH file descriptors with a separate struct file
+ * that refers to the same detached mount tree as @fd_tree1
+ */
+ fd_tree2 = sys_open_tree(fd_tree1, "",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/tmp/target1 testing tmpfs
+ * `-/tmp/target1/AA testing tmpfs
+ * `-/tmp/target1/AA/B testing tmpfs
+ * `-/tmp/target1/AA/B/BB testing tmpfs
+ */
+ ASSERT_EQ(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ /*
+ * This must fail as this would mean adding the same mount tree
+ * into the same mount tree.
+ */
+ ASSERT_NE(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+}
+
+TEST_F(mount_setattr, two_detached_subtrees_of_same_anonymous_mount_namespace)
+{
+ int fd_tree1 = -EBADF, fd_tree2 = -EBADF;
+
+ /*
+ * Copy the following mount tree:
+ *
+ * |-/mnt/A testing tmpfs
+ * `-/mnt/A/AA testing tmpfs
+ * `-/mnt/A/AA/B testing tmpfs
+ * `-/mnt/A/AA/B/BB testing tmpfs
+ */
+ fd_tree1 = sys_open_tree(-EBADF, "/mnt/A",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree1, 0);
+
+ /*
+ * Create an O_PATH file descriptors with a separate struct file that
+ * refers to a subtree of the same detached mount tree as @fd_tree1
+ */
+ fd_tree2 = sys_open_tree(fd_tree1, "AA",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_EMPTY_PATH | OPEN_TREE_CLOEXEC);
+ ASSERT_GE(fd_tree2, 0);
+
+ /*
+ * This must fail as it is only possible to attach the root of a
+ * detached mount tree.
+ */
+ ASSERT_NE(move_mount(fd_tree2, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+
+ ASSERT_EQ(move_mount(fd_tree1, "", -EBADF, "/tmp/target1", MOVE_MOUNT_F_EMPTY_PATH), 0);
+}
+
+TEST_F(mount_setattr, detached_tree_propagation)
+{
+ int fd_tree = -EBADF;
+ struct statx stx1, stx2, stx3, stx4;
+
+ ASSERT_EQ(unshare(CLONE_NEWNS), 0);
+ ASSERT_EQ(mount(NULL, "/mnt", NULL, MS_REC | MS_SHARED, NULL), 0);
+
+ /*
+ * Copy the following mount tree:
+ *
+ * /mnt testing tmpfs
+ * |-/mnt/A testing tmpfs
+ * | `-/mnt/A/AA testing tmpfs
+ * | `-/mnt/A/AA/B testing tmpfs
+ * | `-/mnt/A/AA/B/BB testing tmpfs
+ * `-/mnt/B testing ramfs
+ */
+ fd_tree = sys_open_tree(-EBADF, "/mnt",
+ AT_NO_AUTOMOUNT | AT_SYMLINK_NOFOLLOW |
+ AT_RECURSIVE | OPEN_TREE_CLOEXEC |
+ OPEN_TREE_CLONE);
+ ASSERT_GE(fd_tree, 0);
+
+ ASSERT_EQ(statx(-EBADF, "/mnt/A", 0, 0, &stx1), 0);
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx2), 0);
+
+ /*
+ * Copying the mount namespace like done above doesn't alter the
+ * mounts in any way so the filesystem mounted on /mnt must be
+ * identical even though the mounts will differ. Use the device
+ * information to verify that. Note that tmpfs will have a 0
+ * major number so comparing the major number is misleading.
+ */
+ ASSERT_EQ(stx1.stx_dev_minor, stx2.stx_dev_minor);
+
+ /* Mount a tmpfs filesystem over /mnt/A. */
+ ASSERT_EQ(mount(NULL, "/mnt/A", "tmpfs", 0, NULL), 0);
+
+
+ ASSERT_EQ(statx(-EBADF, "/mnt/A", 0, 0, &stx3), 0);
+ ASSERT_EQ(statx(fd_tree, "A", 0, 0, &stx4), 0);
+
+ /*
+ * A new filesystem has been mounted on top of /mnt/A which
+ * means that the device information will be different for any
+ * statx() that was taken from /mnt/A before the mount compared
+ * to one after the mount.
+ *
+ * Since we already now that the device information between the
+ * stx1 and stx2 samples are identical we also now that stx2 and
+ * stx3 device information will necessarily differ.
+ */
+ ASSERT_NE(stx1.stx_dev_minor, stx3.stx_dev_minor);
+
+ /*
+ * If mount propagation worked correctly then the tmpfs mount
+ * that was created after the mount namespace was unshared will
+ * have propagated onto /mnt/A in the detached mount tree.
+ *
+ * Verify that the device information for stx3 and stx4 are
+ * identical. It is already established that stx3 is different
+ * from both stx1 and stx2 sampled before the tmpfs mount was
+ * done so if stx3 and stx4 are identical the proof is done.
+ */
+ ASSERT_EQ(stx3.stx_dev_minor, stx4.stx_dev_minor);
+
+ EXPECT_EQ(close(fd_tree), 0);
+}
+
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/nolibc/Makefile b/tools/testing/selftests/nolibc/Makefile
index 7d14a7c0cb62..58bcbbd029bc 100644
--- a/tools/testing/selftests/nolibc/Makefile
+++ b/tools/testing/selftests/nolibc/Makefile
@@ -47,6 +47,7 @@ XARCH_riscv = riscv64
XARCH = $(or $(XARCH_$(ARCH)),$(ARCH))
# map from user input variants to their kernel supported architectures
+ARCH_armthumb = arm
ARCH_ppc = powerpc
ARCH_ppc64 = powerpc
ARCH_ppc64le = powerpc
@@ -54,6 +55,7 @@ ARCH_mips32le = mips
ARCH_mips32be = mips
ARCH_riscv32 = riscv
ARCH_riscv64 = riscv
+ARCH_s390x = s390
ARCH := $(or $(ARCH_$(XARCH)),$(XARCH))
# kernel image names by architecture
@@ -62,6 +64,7 @@ IMAGE_x86_64 = arch/x86/boot/bzImage
IMAGE_x86 = arch/x86/boot/bzImage
IMAGE_arm64 = arch/arm64/boot/Image
IMAGE_arm = arch/arm/boot/zImage
+IMAGE_armthumb = arch/arm/boot/zImage
IMAGE_mips32le = vmlinuz
IMAGE_mips32be = vmlinuz
IMAGE_ppc = vmlinux
@@ -70,6 +73,7 @@ IMAGE_ppc64le = arch/powerpc/boot/zImage
IMAGE_riscv = arch/riscv/boot/Image
IMAGE_riscv32 = arch/riscv/boot/Image
IMAGE_riscv64 = arch/riscv/boot/Image
+IMAGE_s390x = arch/s390/boot/bzImage
IMAGE_s390 = arch/s390/boot/bzImage
IMAGE_loongarch = arch/loongarch/boot/vmlinuz.efi
IMAGE = $(objtree)/$(IMAGE_$(XARCH))
@@ -81,19 +85,20 @@ DEFCONFIG_x86_64 = defconfig
DEFCONFIG_x86 = defconfig
DEFCONFIG_arm64 = defconfig
DEFCONFIG_arm = multi_v7_defconfig
+DEFCONFIG_armthumb = multi_v7_defconfig
DEFCONFIG_mips32le = malta_defconfig
-DEFCONFIG_mips32be = malta_defconfig
+DEFCONFIG_mips32be = malta_defconfig generic/eb.config
DEFCONFIG_ppc = pmac32_defconfig
DEFCONFIG_ppc64 = powernv_be_defconfig
DEFCONFIG_ppc64le = powernv_defconfig
DEFCONFIG_riscv = defconfig
DEFCONFIG_riscv32 = rv32_defconfig
DEFCONFIG_riscv64 = defconfig
-DEFCONFIG_s390 = defconfig
+DEFCONFIG_s390x = defconfig
+DEFCONFIG_s390 = defconfig compat.config
DEFCONFIG_loongarch = defconfig
DEFCONFIG = $(DEFCONFIG_$(XARCH))
-EXTRACONFIG_mips32be = -d CONFIG_CPU_LITTLE_ENDIAN -e CONFIG_CPU_BIG_ENDIAN
EXTRACONFIG = $(EXTRACONFIG_$(XARCH))
# optional tests to run (default = all)
@@ -105,6 +110,7 @@ QEMU_ARCH_x86_64 = x86_64
QEMU_ARCH_x86 = x86_64
QEMU_ARCH_arm64 = aarch64
QEMU_ARCH_arm = arm
+QEMU_ARCH_armthumb = arm
QEMU_ARCH_mips32le = mipsel # works with malta_defconfig
QEMU_ARCH_mips32be = mips
QEMU_ARCH_ppc = ppc
@@ -113,6 +119,7 @@ QEMU_ARCH_ppc64le = ppc64
QEMU_ARCH_riscv = riscv64
QEMU_ARCH_riscv32 = riscv32
QEMU_ARCH_riscv64 = riscv64
+QEMU_ARCH_s390x = s390x
QEMU_ARCH_s390 = s390x
QEMU_ARCH_loongarch = loongarch64
QEMU_ARCH = $(QEMU_ARCH_$(XARCH))
@@ -133,6 +140,7 @@ QEMU_ARGS_x86_64 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(
QEMU_ARGS_x86 = -M pc -append "console=ttyS0,9600 i8042.noaux panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_arm64 = -M virt -cpu cortex-a53 -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_arm = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_armthumb = -M virt -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32le = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_mips32be = -M malta -append "panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_ppc = -M g3beige -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
@@ -141,6 +149,7 @@ QEMU_ARGS_ppc64le = -M powernv -append "console=hvc0 panic=-1 $(TEST:%=NOLIBC
QEMU_ARGS_riscv = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_riscv32 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_riscv64 = -M virt -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
+QEMU_ARGS_s390x = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_s390 = -M s390-ccw-virtio -append "console=ttyS0 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS_loongarch = -M virt -append "console=ttyS0,115200 panic=-1 $(TEST:%=NOLIBC_TEST=%)"
QEMU_ARGS = -m 1G $(QEMU_ARGS_$(XARCH)) $(QEMU_ARGS_BIOS) $(QEMU_ARGS_EXTRA)
@@ -156,15 +165,18 @@ Q=@
endif
CFLAGS_i386 = $(call cc-option,-m32)
+CFLAGS_arm = -marm
+CFLAGS_armthumb = -mthumb -march=armv6t2
CFLAGS_ppc = -m32 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
CFLAGS_ppc64 = -m64 -mbig-endian -mno-vsx $(call cc-option,-mmultiple)
CFLAGS_ppc64le = -m64 -mlittle-endian -mno-vsx $(call cc-option,-mabi=elfv2)
-CFLAGS_s390 = -m64
+CFLAGS_s390x = -m64
+CFLAGS_s390 = -m31
CFLAGS_mips32le = -EL -mabi=32 -fPIC
CFLAGS_mips32be = -EB -mabi=32
CFLAGS_STACKPROTECTOR ?= $(call cc-option,-mstack-protector-guard=global $(call cc-option,-fstack-protector-all))
CFLAGS ?= -Os -fno-ident -fno-asynchronous-unwind-tables -std=c89 -W -Wall -Wextra \
- $(call cc-option,-fno-stack-protector) \
+ $(call cc-option,-fno-stack-protector) $(call cc-option,-Wmissing-prototypes) \
$(CFLAGS_$(XARCH)) $(CFLAGS_STACKPROTECTOR) $(CFLAGS_EXTRA)
LDFLAGS :=
@@ -220,7 +232,7 @@ all: run
sysroot: sysroot/$(ARCH)/include
-sysroot/$(ARCH)/include:
+sysroot/$(ARCH)/include: | defconfig
$(Q)rm -rf sysroot/$(ARCH) sysroot/sysroot
$(QUIET_MKDIR)mkdir -p sysroot
$(Q)$(MAKE) -C $(srctree) outputmakefile
@@ -264,16 +276,16 @@ initramfs: nolibc-test
$(Q)cp nolibc-test initramfs/init
defconfig:
- $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) mrproper $(DEFCONFIG) prepare
+ $(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(DEFCONFIG)
$(Q)if [ -n "$(EXTRACONFIG)" ]; then \
$(srctree)/scripts/config --file $(objtree)/.config $(EXTRACONFIG); \
$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) olddefconfig < /dev/null; \
fi
-kernel:
+kernel: | defconfig
$(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) < /dev/null
-kernel-standalone: initramfs
+kernel-standalone: initramfs | defconfig
$(Q)$(MAKE) -C $(srctree) ARCH=$(ARCH) CC=$(CC) CROSS_COMPILE=$(CROSS_COMPILE) $(IMAGE_NAME) CONFIG_INITRAMFS_SOURCE=$(CURDIR)/initramfs < /dev/null
# run the tests after building the kernel
diff --git a/tools/testing/selftests/nolibc/nolibc-test-linkage.c b/tools/testing/selftests/nolibc/nolibc-test-linkage.c
index 5ff4c8a1db2a..a7ca8325863f 100644
--- a/tools/testing/selftests/nolibc/nolibc-test-linkage.c
+++ b/tools/testing/selftests/nolibc/nolibc-test-linkage.c
@@ -11,16 +11,16 @@ void *linkage_test_errno_addr(void)
return &errno;
}
-int linkage_test_constructor_test_value;
+int linkage_test_constructor_test_value = 0;
__attribute__((constructor))
static void constructor1(void)
{
- linkage_test_constructor_test_value = 2;
+ linkage_test_constructor_test_value |= 1 << 0;
}
__attribute__((constructor))
static void constructor2(void)
{
- linkage_test_constructor_test_value *= 3;
+ linkage_test_constructor_test_value |= 1 << 1;
}
diff --git a/tools/testing/selftests/nolibc/nolibc-test.c b/tools/testing/selftests/nolibc/nolibc-test.c
index 0e0e3b48a8c3..5884a891c491 100644
--- a/tools/testing/selftests/nolibc/nolibc-test.c
+++ b/tools/testing/selftests/nolibc/nolibc-test.c
@@ -43,6 +43,8 @@
#endif
#endif
+#pragma GCC diagnostic ignored "-Wmissing-prototypes"
+
#include "nolibc-test-linkage.h"
/* for the type of int_fast16_t and int_fast32_t, musl differs from glibc and nolibc */
@@ -690,14 +692,14 @@ int expect_strtox(int llen, void *func, const char *input, int base, intmax_t ex
__attribute__((constructor))
static void constructor1(void)
{
- constructor_test_value = 1;
+ constructor_test_value |= 1 << 0;
}
__attribute__((constructor))
static void constructor2(int argc, char **argv, char **envp)
{
if (argc && argv && envp)
- constructor_test_value *= 2;
+ constructor_test_value |= 1 << 1;
}
int run_startup(int min, int max)
@@ -736,9 +738,9 @@ int run_startup(int min, int max)
CASE_TEST(environ_HOME); EXPECT_PTRNZ(1, getenv("HOME")); break;
CASE_TEST(auxv_addr); EXPECT_PTRGT(test_auxv != (void *)-1, test_auxv, brk); break;
CASE_TEST(auxv_AT_UID); EXPECT_EQ(1, getauxval(AT_UID), getuid()); break;
- CASE_TEST(constructor); EXPECT_EQ(1, constructor_test_value, 2); break;
+ CASE_TEST(constructor); EXPECT_EQ(is_nolibc, constructor_test_value, 0x3); break;
CASE_TEST(linkage_errno); EXPECT_PTREQ(1, linkage_test_errno_addr(), &errno); break;
- CASE_TEST(linkage_constr); EXPECT_EQ(1, linkage_test_constructor_test_value, 6); break;
+ CASE_TEST(linkage_constr); EXPECT_EQ(1, linkage_test_constructor_test_value, 0x3); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
@@ -767,6 +769,44 @@ int test_getdents64(const char *dir)
return ret;
}
+static int test_dirent(void)
+{
+ int comm = 0, cmdline = 0;
+ struct dirent dirent, *result;
+ DIR *dir;
+ int ret;
+
+ dir = opendir("/proc/self");
+ if (!dir)
+ return 1;
+
+ while (1) {
+ errno = 0;
+ ret = readdir_r(dir, &dirent, &result);
+ if (ret != 0)
+ return 1;
+ if (!result)
+ break;
+
+ if (strcmp(dirent.d_name, "comm") == 0)
+ comm++;
+ else if (strcmp(dirent.d_name, "cmdline") == 0)
+ cmdline++;
+ }
+
+ if (errno)
+ return 1;
+
+ ret = closedir(dir);
+ if (ret)
+ return 1;
+
+ if (comm != 1 || cmdline != 1)
+ return 1;
+
+ return 0;
+}
+
int test_getpagesize(void)
{
int x = getpagesize();
@@ -988,6 +1028,22 @@ int test_rlimit(void)
return 0;
}
+int test_openat(void)
+{
+ int dev, null;
+
+ dev = openat(AT_FDCWD, "/dev", O_DIRECTORY);
+ if (dev < 0)
+ return -1;
+
+ null = openat(dev, "null", O_RDONLY);
+ close(dev);
+ if (null < 0)
+ return -1;
+
+ close(null);
+ return 0;
+}
/* Run syscall tests between IDs <min> and <max>.
* Return 0 on success, non-zero on failure.
@@ -1059,6 +1115,7 @@ int run_syscall(int min, int max)
CASE_TEST(fork); EXPECT_SYSZR(1, test_fork()); break;
CASE_TEST(getdents64_root); EXPECT_SYSNE(1, test_getdents64("/"), -1); break;
CASE_TEST(getdents64_null); EXPECT_SYSER(1, test_getdents64("/dev/null"), -1, ENOTDIR); break;
+ CASE_TEST(directories); EXPECT_SYSZR(proc, test_dirent()); break;
CASE_TEST(gettimeofday_tv); EXPECT_SYSZR(1, gettimeofday(&tv, NULL)); break;
CASE_TEST(gettimeofday_tv_tz);EXPECT_SYSZR(1, gettimeofday(&tv, &tz)); break;
CASE_TEST(getpagesize); EXPECT_SYSZR(1, test_getpagesize()); break;
@@ -1073,8 +1130,9 @@ int run_syscall(int min, int max)
CASE_TEST(mmap_bad); EXPECT_PTRER(1, mmap(NULL, 0, PROT_READ, MAP_PRIVATE, 0, 0), MAP_FAILED, EINVAL); break;
CASE_TEST(munmap_bad); EXPECT_SYSER(1, munmap(NULL, 0), -1, EINVAL); break;
CASE_TEST(mmap_munmap_good); EXPECT_SYSZR(1, test_mmap_munmap()); break;
- CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", 0), -1); if (tmp != -1) close(tmp); break;
- CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", 0), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(open_tty); EXPECT_SYSNE(1, tmp = open("/dev/null", O_RDONLY), -1); if (tmp != -1) close(tmp); break;
+ CASE_TEST(open_blah); EXPECT_SYSER(1, tmp = open("/proc/self/blah", O_RDONLY), -1, ENOENT); if (tmp != -1) close(tmp); break;
+ CASE_TEST(openat_dir); EXPECT_SYSZR(1, test_openat()); break;
CASE_TEST(pipe); EXPECT_SYSZR(1, test_pipe()); break;
CASE_TEST(poll_null); EXPECT_SYSZR(1, poll(NULL, 0, 0)); break;
CASE_TEST(poll_stdout); EXPECT_SYSNE(1, ({ struct pollfd fds = { 1, POLLOUT, 0}; poll(&fds, 1, 0); }), -1); break;
@@ -1284,6 +1342,73 @@ static int expect_vfprintf(int llen, int c, const char *expected, const char *fm
return ret;
}
+static int test_scanf(void)
+{
+ unsigned long long ull;
+ unsigned long ul;
+ unsigned int u;
+ long long ll;
+ long l;
+ void *p;
+ int i;
+
+ /* return __LINE__ to point to the specific failure */
+
+ /* test EOF */
+ if (sscanf("", "foo") != EOF)
+ return __LINE__;
+
+ /* test simple literal without placeholder */
+ if (sscanf("foo", "foo") != 0)
+ return __LINE__;
+
+ /* test single placeholder */
+ if (sscanf("123", "%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 123)
+ return __LINE__;
+
+ /* test multiple place holders and separators */
+ if (sscanf("a123b456c0x90", "a%db%uc%p", &i, &u, &p) != 3)
+ return __LINE__;
+
+ if (i != 123)
+ return __LINE__;
+
+ if (u != 456)
+ return __LINE__;
+
+ if (p != (void *)0x90)
+ return __LINE__;
+
+ /* test space handling */
+ if (sscanf("a b1", "a b%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 1)
+ return __LINE__;
+
+ /* test literal percent */
+ if (sscanf("a%1", "a%%%d", &i) != 1)
+ return __LINE__;
+
+ if (i != 1)
+ return __LINE__;
+
+ /* test stdint.h types */
+ if (sscanf("1|2|3|4|5|6",
+ "%d|%ld|%lld|%u|%lu|%llu",
+ &i, &l, &ll, &u, &ul, &ull) != 6)
+ return __LINE__;
+
+ if (i != 1 || l != 2 || ll != 3 ||
+ u != 4 || ul != 5 || ull != 6)
+ return __LINE__;
+
+ return 0;
+}
+
static int run_vfprintf(int min, int max)
{
int test;
@@ -1305,6 +1430,7 @@ static int run_vfprintf(int min, int max)
CASE_TEST(char); EXPECT_VFPRINTF(1, "c", "%c", 'c'); break;
CASE_TEST(hex); EXPECT_VFPRINTF(1, "f", "%x", 0xf); break;
CASE_TEST(pointer); EXPECT_VFPRINTF(3, "0x1", "%p", (void *) 0x1); break;
+ CASE_TEST(scanf); EXPECT_ZR(1, test_scanf()); break;
case __LINE__:
return ret; /* must be last */
/* note: do not set any defaults so as to permit holes above */
diff --git a/tools/testing/selftests/nolibc/run-tests.sh b/tools/testing/selftests/nolibc/run-tests.sh
index 9c5160c53881..0299a0912d40 100755
--- a/tools/testing/selftests/nolibc/run-tests.sh
+++ b/tools/testing/selftests/nolibc/run-tests.sh
@@ -17,7 +17,16 @@ perform_download=0
test_mode=system
werror=1
llvm=
-archs="i386 x86_64 arm64 arm mips32le mips32be ppc ppc64 ppc64le riscv32 riscv64 s390 loongarch"
+all_archs=(
+ i386 x86_64
+ arm64 arm armthumb
+ mips32le mips32be
+ ppc ppc64 ppc64le
+ riscv32 riscv64
+ s390x s390
+ loongarch
+)
+archs="${all_archs[@]}"
TEMP=$(getopt -o 'j:d:c:b:a:m:pelh' -n "$0" -- "$@")
@@ -94,19 +103,21 @@ fi
crosstool_arch() {
case "$1" in
arm64) echo aarch64;;
+ armthumb) echo arm;;
ppc) echo powerpc;;
ppc64) echo powerpc64;;
ppc64le) echo powerpc64;;
riscv) echo riscv64;;
loongarch) echo loongarch64;;
mips*) echo mips;;
+ s390*) echo s390;;
*) echo "$1";;
esac
}
crosstool_abi() {
case "$1" in
- arm) echo linux-gnueabi;;
+ arm | armthumb) echo linux-gnueabi;;
*) echo linux;;
esac
}
@@ -157,10 +168,6 @@ test_arch() {
fi
MAKE=(make -j"${nproc}" XARCH="${arch}" CROSS_COMPILE="${cross_compile}" LLVM="${llvm}" O="${build_dir}")
- mkdir -p "$build_dir"
- if [ "$test_mode" = "system" ] && [ ! -f "${build_dir}/.config" ]; then
- swallow_output "${MAKE[@]}" defconfig
- fi
case "$test_mode" in
'system')
test_target=run
@@ -173,6 +180,13 @@ test_arch() {
exit 1
esac
printf '%-15s' "$arch:"
+ if [ "$arch" = "s390" ] && ([ "$llvm" = "1" ] || [ "$test_mode" = "user" ]); then
+ echo "Unsupported configuration"
+ return
+ fi
+
+ mkdir -p "$build_dir"
+ swallow_output "${MAKE[@]}" defconfig
swallow_output "${MAKE[@]}" CFLAGS_EXTRA="$CFLAGS_EXTRA" "$test_target" V=1
cp run.out run.out."${arch}"
"${MAKE[@]}" report | grep passed
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index bf92481f925c..0406a065deb4 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -8,3 +8,5 @@ pidfd_getfd_test
pidfd_setns_test
pidfd_file_handle_test
pidfd_bind_mount
+pidfd_info_test
+pidfd_exec_helper
diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile
index 301343a11b62..fcbefc0d77f6 100644
--- a/tools/testing/selftests/pidfd/Makefile
+++ b/tools/testing/selftests/pidfd/Makefile
@@ -3,7 +3,9 @@ CFLAGS += -g $(KHDR_INCLUDES) -pthread -Wall
TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \
pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test \
- pidfd_file_handle_test pidfd_bind_mount
+ pidfd_file_handle_test pidfd_bind_mount pidfd_info_test
+
+TEST_GEN_PROGS_EXTENDED := pidfd_exec_helper
include ../lib.mk
diff --git a/tools/testing/selftests/pidfd/pidfd.h b/tools/testing/selftests/pidfd/pidfd.h
index 0b96ac4b8ce5..cec22aa11cdf 100644
--- a/tools/testing/selftests/pidfd/pidfd.h
+++ b/tools/testing/selftests/pidfd/pidfd.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <string.h>
#include <syscall.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -50,6 +51,107 @@
#define PIDFD_NONBLOCK O_NONBLOCK
#endif
+#ifndef PIDFD_SELF_THREAD
+#define PIDFD_SELF_THREAD -10000 /* Current thread. */
+#endif
+
+#ifndef PIDFD_SELF_THREAD_GROUP
+#define PIDFD_SELF_THREAD_GROUP -20000 /* Current thread group leader. */
+#endif
+
+#ifndef PIDFD_SELF
+#define PIDFD_SELF PIDFD_SELF_THREAD
+#endif
+
+#ifndef PIDFD_SELF_PROCESS
+#define PIDFD_SELF_PROCESS PIDFD_SELF_THREAD_GROUP
+#endif
+
+#ifndef PIDFS_IOCTL_MAGIC
+#define PIDFS_IOCTL_MAGIC 0xFF
+#endif
+
+#ifndef PIDFD_GET_CGROUP_NAMESPACE
+#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
+#endif
+
+#ifndef PIDFD_GET_IPC_NAMESPACE
+#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
+#endif
+
+#ifndef PIDFD_GET_MNT_NAMESPACE
+#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
+#endif
+
+#ifndef PIDFD_GET_NET_NAMESPACE
+#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
+#endif
+
+#ifndef PIDFD_GET_PID_NAMESPACE
+#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
+#endif
+
+#ifndef PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
+#endif
+
+#ifndef PIDFD_GET_TIME_NAMESPACE
+#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
+#endif
+
+#ifndef PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE
+#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
+#endif
+
+#ifndef PIDFD_GET_USER_NAMESPACE
+#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
+#endif
+
+#ifndef PIDFD_GET_UTS_NAMESPACE
+#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
+#endif
+
+#ifndef PIDFD_GET_INFO
+#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info)
+#endif
+
+#ifndef PIDFD_INFO_PID
+#define PIDFD_INFO_PID (1UL << 0) /* Always returned, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_CREDS
+#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_CGROUPID
+#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */
+#endif
+
+#ifndef PIDFD_INFO_EXIT
+#define PIDFD_INFO_EXIT (1UL << 3) /* Always returned if available, even if not requested */
+#endif
+
+#ifndef PIDFD_THREAD
+#define PIDFD_THREAD O_EXCL
+#endif
+
+struct pidfd_info {
+ __u64 mask;
+ __u64 cgroupid;
+ __u32 pid;
+ __u32 tgid;
+ __u32 ppid;
+ __u32 ruid;
+ __u32 rgid;
+ __u32 euid;
+ __u32 egid;
+ __u32 suid;
+ __u32 sgid;
+ __u32 fsuid;
+ __u32 fsgid;
+ __s32 exit_code;
+};
+
/*
* The kernel reserves 300 pids via RESERVED_PIDS in kernel/pid.c
* That means, when it wraps around any pid < 300 will be skipped.
@@ -152,4 +254,11 @@ static inline ssize_t write_nointr(int fd, const void *buf, size_t count)
return ret;
}
+static inline int sys_execveat(int dirfd, const char *pathname,
+ char *const argv[], char *const envp[],
+ int flags)
+{
+ return syscall(__NR_execveat, dirfd, pathname, argv, envp, flags);
+}
+
#endif /* __PIDFD_H */
diff --git a/tools/testing/selftests/pidfd/pidfd_exec_helper.c b/tools/testing/selftests/pidfd/pidfd_exec_helper.c
new file mode 100644
index 000000000000..5516808c95f2
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_exec_helper.c
@@ -0,0 +1,12 @@
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int main(int argc, char *argv[])
+{
+ if (pause())
+ _exit(EXIT_FAILURE);
+
+ _exit(EXIT_SUCCESS);
+}
diff --git a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
index f062a986e382..f718aac75068 100644
--- a/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_fdinfo_test.c
@@ -13,6 +13,7 @@
#include <syscall.h>
#include <sys/wait.h>
#include <sys/mman.h>
+#include <sys/mount.h>
#include "pidfd.h"
#include "../kselftest.h"
diff --git a/tools/testing/selftests/pidfd/pidfd_info_test.c b/tools/testing/selftests/pidfd/pidfd_info_test.c
new file mode 100644
index 000000000000..1758a1b0457b
--- /dev/null
+++ b/tools/testing/selftests/pidfd/pidfd_info_test.c
@@ -0,0 +1,692 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/types.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syscall.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/kcmp.h>
+#include <sys/stat.h>
+
+#include "pidfd.h"
+#include "../kselftest_harness.h"
+
+FIXTURE(pidfd_info)
+{
+ pid_t child_pid1;
+ int child_pidfd1;
+
+ pid_t child_pid2;
+ int child_pidfd2;
+
+ pid_t child_pid3;
+ int child_pidfd3;
+
+ pid_t child_pid4;
+ int child_pidfd4;
+};
+
+FIXTURE_SETUP(pidfd_info)
+{
+ int ret;
+ int ipc_sockets[2];
+ char c;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid1 = create_child(&self->child_pidfd1, 0);
+ EXPECT_GE(self->child_pid1, 0);
+
+ if (self->child_pid1 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* SIGKILL but don't reap. */
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd1, SIGKILL, NULL, 0), 0);
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ self->child_pid2 = create_child(&self->child_pidfd2, 0);
+ EXPECT_GE(self->child_pid2, 0);
+
+ if (self->child_pid2 == 0) {
+ close(ipc_sockets[0]);
+
+ if (write_nointr(ipc_sockets[1], "1", 1) < 0)
+ _exit(EXIT_FAILURE);
+
+ close(ipc_sockets[1]);
+
+ pause();
+ _exit(EXIT_SUCCESS);
+ }
+
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &c, 1), 1);
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* SIGKILL and reap. */
+ EXPECT_EQ(sys_pidfd_send_signal(self->child_pidfd2, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid2, NULL, WEXITED), 0);
+
+ self->child_pid3 = create_child(&self->child_pidfd3, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid3, 0);
+
+ if (self->child_pid3 == 0)
+ _exit(EXIT_SUCCESS);
+
+ self->child_pid4 = create_child(&self->child_pidfd4, CLONE_NEWUSER | CLONE_NEWPID);
+ EXPECT_GE(self->child_pid4, 0);
+
+ if (self->child_pid4 == 0)
+ _exit(EXIT_SUCCESS);
+
+ EXPECT_EQ(sys_waitid(P_PID, self->child_pid4, NULL, WEXITED), 0);
+}
+
+FIXTURE_TEARDOWN(pidfd_info)
+{
+ sys_pidfd_send_signal(self->child_pidfd1, SIGKILL, NULL, 0);
+ if (self->child_pidfd1 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd1));
+
+ sys_waitid(P_PID, self->child_pid1, NULL, WEXITED);
+
+ sys_pidfd_send_signal(self->child_pidfd2, SIGKILL, NULL, 0);
+ if (self->child_pidfd2 >= 0)
+ EXPECT_EQ(0, close(self->child_pidfd2));
+
+ sys_waitid(P_PID, self->child_pid2, NULL, WEXITED);
+ sys_waitid(P_PID, self->child_pid3, NULL, WEXITED);
+ sys_waitid(P_PID, self->child_pid4, NULL, WEXITED);
+}
+
+TEST_F(pidfd_info, sigkill_exit)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has exited but not been reaped so this must work. */
+ ASSERT_EQ(ioctl(self->child_pidfd1, PIDFD_GET_INFO, &info), 0);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd1, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+}
+
+TEST_F(pidfd_info, sigkill_reaped)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has already been reaped and PIDFD_INFO_EXIT hasn't been set. */
+ ASSERT_NE(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_EQ(errno, ESRCH);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+}
+
+TEST_F(pidfd_info, success_exit)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has exited but not been reaped so this must work. */
+ ASSERT_EQ(ioctl(self->child_pidfd3, PIDFD_GET_INFO, &info), 0);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd3, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+}
+
+TEST_F(pidfd_info, success_reaped)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID,
+ };
+
+ /* Process has already been reaped and PIDFD_INFO_EXIT hasn't been set. */
+ ASSERT_NE(ioctl(self->child_pidfd4, PIDFD_GET_INFO, &info), 0);
+ ASSERT_EQ(errno, ESRCH);
+
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(self->child_pidfd4, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+}
+
+TEST_F(pidfd_info, success_reaped_poll)
+{
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+ struct pollfd fds = {};
+ int nevents;
+
+ fds.events = POLLIN;
+ fds.fd = self->child_pidfd2;
+
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ ASSERT_EQ(ioctl(self->child_pidfd2, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+}
+
+static void *pidfd_info_pause_thread(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ /* Sleep untill we're killed. */
+ pause();
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_thread, pidfd_leader_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ }, info2;
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_pause_thread, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /* Make the thread-group leader exit prematurely. */
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /*
+ * Opening a PIDFD_THREAD aka thread-specific pidfd based on a
+ * thread-group leader must succeed.
+ */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * We can't poll and wait for the old thread-group
+ * leader to exit using a thread-specific pidfd. The
+ * thread-group leader exited prematurely and
+ * notification is delayed until all subthreads have
+ * exited.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ /* Opening a thread as a thread-group leader must fail. */
+ pidfd_thread = sys_pidfd_open(pid_thread, 0);
+ ASSERT_LT(pidfd_thread, 0);
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /*
+ * Note that pidfd_leader is a thread-group pidfd, so polling on it
+ * would only notify us once all thread in the thread-group have
+ * exited. So we can't poll before we have taken down the whole
+ * thread-group.
+ */
+
+ /* Get PIDFD_GET_INFO using the thread-group leader pidfd. */
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /*
+ * Now retrieve the same info using the thread specific pidfd
+ * for the thread-group leader.
+ */
+ info2.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader_thread, PIDFD_GET_INFO, &info2), 0);
+ ASSERT_TRUE(!!(info2.mask & PIDFD_INFO_CREDS));
+ /* Process has exited but not been reaped, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info2.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info2.pid, pid_leader);
+
+ /* Now try the thread-specific pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ /* The thread hasn't exited, so no PIDFD_INFO_EXIT information yet. */
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_thread);
+
+ /*
+ * Take down the whole thread-group. The thread-group leader
+ * exited successfully but the thread will now be SIGKILLed.
+ * This must be reflected in the recorded exit information.
+ */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ /* The thread-group leader has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /*
+ * Retrieve exit information for the thread-group leader via the
+ * thread-group leader pidfd.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * Retrieve exit information for the thread-group leader via the
+ * thread-specific pidfd.
+ */
+ info2.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader_thread, PIDFD_GET_INFO, &info2), 0);
+ ASSERT_FALSE(!!(info2.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info2.mask & PIDFD_INFO_EXIT));
+
+ /* The thread-group leader exited successfully. Only the specific thread was SIGKILLed. */
+ ASSERT_TRUE(WIFEXITED(info2.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info2.exit_code), 0);
+
+ /* Retrieve exit information for the thread. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ /* The thread got SIGKILLed. */
+ ASSERT_TRUE(WIFSIGNALED(info.exit_code));
+ ASSERT_EQ(WTERMSIG(info.exit_code), SIGKILL);
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+static void *pidfd_info_thread_exec(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ if (read_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ sys_execveat(AT_FDCWD, "pidfd_exec_helper", NULL, NULL, 0);
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group_exec)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_leader_thread, pidfd_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_thread_exec, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /* Make the thread-group leader exit prematurely. */
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /* Open a thread-specific pidfd for the thread-group leader. */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * We can't poll and wait for the old thread-group
+ * leader to exit using a thread-specific pidfd. The
+ * thread-group leader exited prematurely and
+ * notification is delayed until all subthreads have
+ * exited.
+ *
+ * When the thread has execed it will taken over the old
+ * thread-group leaders struct pid. Calling poll after
+ * the thread execed will thus block again because a new
+ * thread-group has started.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ /* Now that we've opened a thread-specific pidfd the thread can exec. */
+ ASSERT_EQ(write_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /* Wait until the kernel has SIGKILLed the thread. */
+ fds.events = POLLHUP;
+ fds.fd = pidfd_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ /* The thread has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /* Retrieve thread-specific exit info from pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /*
+ * While the kernel will have SIGKILLed the whole thread-group
+ * during exec it will cause the individual threads to exit
+ * cleanly.
+ */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * The thread-group leader is still alive, the thread has taken
+ * over its struct pid and thus its pid number.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /* Take down the thread-group leader. */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+
+ /*
+ * Afte the exec we're dealing with an empty thread-group so now
+ * we must see an exit notification on the thread-specific pidfd
+ * for the thread-group leader as there's no subthread that can
+ * revive the struct pid.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_FALSE(!!(fds.revents & POLLHUP));
+
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ /* Retrieve exit information for the thread-group leader. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+static void *pidfd_info_thread_exec_sane(void *arg)
+{
+ pid_t pid_thread = gettid();
+ int ipc_socket = *(int *)arg;
+
+ /* Inform the grand-parent what the tid of this thread is. */
+ if (write_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ if (read_nointr(ipc_socket, &pid_thread, sizeof(pid_thread)) != sizeof(pid_thread))
+ return NULL;
+
+ close(ipc_socket);
+
+ sys_execveat(AT_FDCWD, "pidfd_exec_helper", NULL, NULL, 0);
+ return NULL;
+}
+
+TEST_F(pidfd_info, thread_group_exec_thread)
+{
+ pid_t pid_leader, pid_poller, pid_thread;
+ pthread_t thread;
+ int nevents, pidfd_leader, pidfd_leader_thread, pidfd_thread, ret;
+ int ipc_sockets[2];
+ struct pollfd fds = {};
+ struct pidfd_info info = {
+ .mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT,
+ };
+
+ ret = socketpair(AF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
+ EXPECT_EQ(ret, 0);
+
+ pid_leader = create_child(&pidfd_leader, 0);
+ EXPECT_GE(pid_leader, 0);
+
+ if (pid_leader == 0) {
+ close(ipc_sockets[0]);
+
+ /* The thread will outlive the thread-group leader. */
+ if (pthread_create(&thread, NULL, pidfd_info_thread_exec_sane, &ipc_sockets[1]))
+ syscall(__NR_exit, EXIT_FAILURE);
+
+ /*
+ * Pause the thread-group leader. It will be killed once
+ * the subthread execs.
+ */
+ pause();
+ syscall(__NR_exit, EXIT_SUCCESS);
+ }
+
+ /* Retrieve the tid of the thread. */
+ EXPECT_EQ(close(ipc_sockets[1]), 0);
+ ASSERT_EQ(read_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+
+ /* Opening a thread as a PIDFD_THREAD must succeed. */
+ pidfd_thread = sys_pidfd_open(pid_thread, PIDFD_THREAD);
+ ASSERT_GE(pidfd_thread, 0);
+
+ /* Open a thread-specific pidfd for the thread-group leader. */
+ pidfd_leader_thread = sys_pidfd_open(pid_leader, PIDFD_THREAD);
+ ASSERT_GE(pidfd_leader_thread, 0);
+
+ pid_poller = fork();
+ ASSERT_GE(pid_poller, 0);
+ if (pid_poller == 0) {
+ /*
+ * The subthread will now exec. The struct pid of the old
+ * thread-group leader will be assumed by the subthread which
+ * becomes the new thread-group leader. So no exit notification
+ * must be generated. Wait for 5 seconds and call it a success
+ * if no notification has been received.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, 10000 /* wait 5 seconds */);
+ if (nevents != 0)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLIN)
+ _exit(EXIT_FAILURE);
+ if (fds.revents & POLLHUP)
+ _exit(EXIT_FAILURE);
+ _exit(EXIT_SUCCESS);
+ }
+
+ /* Now that we've opened a thread-specific pidfd the thread can exec. */
+ ASSERT_EQ(write_nointr(ipc_sockets[0], &pid_thread, sizeof(pid_thread)), sizeof(pid_thread));
+ EXPECT_EQ(close(ipc_sockets[0]), 0);
+ ASSERT_EQ(wait_for_pid(pid_poller), 0);
+
+ /* Wait until the kernel has SIGKILLed the thread. */
+ fds.events = POLLHUP;
+ fds.fd = pidfd_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ /* The thread has been reaped. */
+ ASSERT_TRUE(!!(fds.revents & POLLHUP));
+
+ /* Retrieve thread-specific exit info from pidfd. */
+ ASSERT_EQ(ioctl(pidfd_thread, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+ /*
+ * While the kernel will have SIGKILLed the whole thread-group
+ * during exec it will cause the individual threads to exit
+ * cleanly.
+ */
+ ASSERT_TRUE(WIFEXITED(info.exit_code));
+ ASSERT_EQ(WEXITSTATUS(info.exit_code), 0);
+
+ /*
+ * The thread-group leader is still alive, the thread has taken
+ * over its struct pid and thus its pid number.
+ */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_EXIT));
+ ASSERT_EQ(info.pid, pid_leader);
+
+ /* Take down the thread-group leader. */
+ EXPECT_EQ(sys_pidfd_send_signal(pidfd_leader, SIGKILL, NULL, 0), 0);
+
+ /*
+ * Afte the exec we're dealing with an empty thread-group so now
+ * we must see an exit notification on the thread-specific pidfd
+ * for the thread-group leader as there's no subthread that can
+ * revive the struct pid.
+ */
+ fds.events = POLLIN;
+ fds.fd = pidfd_leader_thread;
+ nevents = poll(&fds, 1, -1);
+ ASSERT_EQ(nevents, 1);
+ ASSERT_TRUE(!!(fds.revents & POLLIN));
+ ASSERT_FALSE(!!(fds.revents & POLLHUP));
+
+ EXPECT_EQ(sys_waitid(P_PIDFD, pidfd_leader, NULL, WEXITED), 0);
+
+ /* Retrieve exit information for the thread-group leader. */
+ info.mask = PIDFD_INFO_CGROUPID | PIDFD_INFO_EXIT;
+ ASSERT_EQ(ioctl(pidfd_leader, PIDFD_GET_INFO, &info), 0);
+ ASSERT_FALSE(!!(info.mask & PIDFD_INFO_CREDS));
+ ASSERT_TRUE(!!(info.mask & PIDFD_INFO_EXIT));
+
+ EXPECT_EQ(close(pidfd_leader), 0);
+ EXPECT_EQ(close(pidfd_thread), 0);
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c
index ce413a221bac..cd3de40e4977 100644
--- a/tools/testing/selftests/pidfd/pidfd_open_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_open_test.c
@@ -22,32 +22,6 @@
#include "pidfd.h"
#include "../kselftest.h"
-#ifndef PIDFS_IOCTL_MAGIC
-#define PIDFS_IOCTL_MAGIC 0xFF
-#endif
-
-#ifndef PIDFD_GET_INFO
-#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info)
-#define PIDFD_INFO_CGROUPID (1UL << 0)
-
-struct pidfd_info {
- __u64 request_mask;
- __u64 cgroupid;
- __u32 pid;
- __u32 tgid;
- __u32 ppid;
- __u32 ruid;
- __u32 rgid;
- __u32 euid;
- __u32 egid;
- __u32 suid;
- __u32 sgid;
- __u32 fsuid;
- __u32 fsgid;
- __u32 spare0[1];
-};
-#endif
-
static int safe_int(const char *numstr, int *converted)
{
char *err = NULL;
@@ -148,7 +122,7 @@ out:
int main(int argc, char **argv)
{
struct pidfd_info info = {
- .request_mask = PIDFD_INFO_CGROUPID,
+ .mask = PIDFD_INFO_CGROUPID,
};
int pidfd = -1, ret = 1;
pid_t pid;
@@ -227,7 +201,7 @@ int main(int argc, char **argv)
getegid(), info.sgid);
goto on_error;
}
- if ((info.request_mask & PIDFD_INFO_CGROUPID) && info.cgroupid == 0) {
+ if ((info.mask & PIDFD_INFO_CGROUPID) && info.cgroupid == 0) {
ksft_print_msg("cgroupid should not be 0 when PIDFD_INFO_CGROUPID is set\n");
goto on_error;
}
diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c
index 222f8131283b..e6a079b3d5e2 100644
--- a/tools/testing/selftests/pidfd/pidfd_setns_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c
@@ -16,55 +16,10 @@
#include <unistd.h>
#include <sys/socket.h>
#include <sys/stat.h>
-#include <linux/ioctl.h>
#include "pidfd.h"
#include "../kselftest_harness.h"
-#ifndef PIDFS_IOCTL_MAGIC
-#define PIDFS_IOCTL_MAGIC 0xFF
-#endif
-
-#ifndef PIDFD_GET_CGROUP_NAMESPACE
-#define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1)
-#endif
-
-#ifndef PIDFD_GET_IPC_NAMESPACE
-#define PIDFD_GET_IPC_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 2)
-#endif
-
-#ifndef PIDFD_GET_MNT_NAMESPACE
-#define PIDFD_GET_MNT_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 3)
-#endif
-
-#ifndef PIDFD_GET_NET_NAMESPACE
-#define PIDFD_GET_NET_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 4)
-#endif
-
-#ifndef PIDFD_GET_PID_NAMESPACE
-#define PIDFD_GET_PID_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 5)
-#endif
-
-#ifndef PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE
-#define PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 6)
-#endif
-
-#ifndef PIDFD_GET_TIME_NAMESPACE
-#define PIDFD_GET_TIME_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 7)
-#endif
-
-#ifndef PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE
-#define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8)
-#endif
-
-#ifndef PIDFD_GET_USER_NAMESPACE
-#define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9)
-#endif
-
-#ifndef PIDFD_GET_UTS_NAMESPACE
-#define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10)
-#endif
-
enum {
PIDFD_NS_USER,
PIDFD_NS_MNT,
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index e9728e86b4f2..fcd85cad9f18 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -42,12 +42,41 @@ static pid_t pidfd_clone(int flags, int *pidfd, int (*fn)(void *))
#endif
}
-static int signal_received;
+static pthread_t signal_received;
static void set_signal_received_on_sigusr1(int sig)
{
if (sig == SIGUSR1)
- signal_received = 1;
+ signal_received = pthread_self();
+}
+
+static int send_signal(int pidfd)
+{
+ int ret = 0;
+
+ if (sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (signal_received != pthread_self()) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ signal_received = 0;
+ return ret;
+}
+
+static void *send_signal_worker(void *arg)
+{
+ int pidfd = (int)(intptr_t)arg;
+ int ret;
+
+ /* We forward any errors for the caller to handle. */
+ ret = send_signal(pidfd);
+ return (void *)(intptr_t)ret;
}
/*
@@ -56,8 +85,11 @@ static void set_signal_received_on_sigusr1(int sig)
*/
static int test_pidfd_send_signal_simple_success(void)
{
- int pidfd, ret;
+ int pidfd;
const char *test_name = "pidfd_send_signal send SIGUSR1";
+ pthread_t thread;
+ void *thread_res;
+ int err;
if (!have_pidfd_send_signal) {
ksft_test_result_skip(
@@ -66,25 +98,45 @@ static int test_pidfd_send_signal_simple_success(void)
return 0;
}
+ signal(SIGUSR1, set_signal_received_on_sigusr1);
+
+ /* Try sending a signal to ourselves via /proc/self. */
pidfd = open("/proc/self", O_DIRECTORY | O_CLOEXEC);
if (pidfd < 0)
ksft_exit_fail_msg(
"%s test: Failed to open process file descriptor\n",
test_name);
+ err = send_signal(pidfd);
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on sending pidfd signal\n",
+ test_name, err);
+ close(pidfd);
- signal(SIGUSR1, set_signal_received_on_sigusr1);
+ /* Now try the same thing only using PIDFD_SELF_THREAD_GROUP. */
+ err = send_signal(PIDFD_SELF_THREAD_GROUP);
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on PIDFD_SELF_THREAD_GROUP signal\n",
+ test_name, err);
- ret = sys_pidfd_send_signal(pidfd, SIGUSR1, NULL, 0);
- close(pidfd);
- if (ret < 0)
- ksft_exit_fail_msg("%s test: Failed to send signal\n",
+ /*
+ * Now try the same thing in a thread and assert thread ID is equal to
+ * worker thread ID.
+ */
+ if (pthread_create(&thread, NULL, send_signal_worker,
+ (void *)(intptr_t)PIDFD_SELF_THREAD))
+ ksft_exit_fail_msg("%s test: Failed to create thread\n",
test_name);
-
- if (signal_received != 1)
- ksft_exit_fail_msg("%s test: Failed to receive signal\n",
+ if (pthread_join(thread, &thread_res))
+ ksft_exit_fail_msg("%s test: Failed to join thread\n",
test_name);
+ err = (int)(intptr_t)thread_res;
+ if (err)
+ ksft_exit_fail_msg(
+ "%s test: Error %d on PIDFD_SELF_THREAD signal\n",
+ test_name, err);
- signal_received = 0;
ksft_test_result_pass("%s test: Sent signal\n", test_name);
return 0;
}
diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h
index 3a0129467de6..d6deb6ffa1b9 100644
--- a/tools/testing/selftests/powerpc/include/pkeys.h
+++ b/tools/testing/selftests/powerpc/include/pkeys.h
@@ -24,6 +24,9 @@
#undef PKEY_DISABLE_EXECUTE
#define PKEY_DISABLE_EXECUTE 0x4
+#undef PKEY_UNRESTRICTED
+#define PKEY_UNRESTRICTED 0x0
+
/* Older versions of libc do not define this */
#ifndef SEGV_PKUERR
#define SEGV_PKUERR 4
@@ -93,7 +96,7 @@ int pkeys_unsupported(void)
SKIP_IF(!hash_mmu);
/* Check if the system call is supported */
- pkey = sys_pkey_alloc(0, 0);
+ pkey = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
SKIP_IF(pkey < 0);
sys_pkey_free(pkey);
diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
index 0af4f02669a1..29b91b7456eb 100644
--- a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
+++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
@@ -72,7 +72,7 @@ static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
switch (fault_type) {
case PKEY_DISABLE_ACCESS:
- pkey_set_rights(fault_pkey, 0);
+ pkey_set_rights(fault_pkey, PKEY_UNRESTRICTED);
break;
case PKEY_DISABLE_EXECUTE:
/*
diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
index 2db76e56d4cb..e89a164c686b 100644
--- a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
+++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c
@@ -83,7 +83,7 @@ static void segv_handler(int signum, siginfo_t *sinfo, void *ctx)
mprotect(pgstart, pgsize, PROT_EXEC))
_exit(1);
else
- pkey_set_rights(pkey, 0);
+ pkey_set_rights(pkey, PKEY_UNRESTRICTED);
fault_count++;
}
diff --git a/tools/testing/selftests/powerpc/ptrace/core-pkey.c b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
index f061434af452..7ff53caeb4aa 100644
--- a/tools/testing/selftests/powerpc/ptrace/core-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
@@ -95,16 +95,16 @@ static int child(struct shared_info *info)
/* Get some pkeys so that we can change their bits in the AMR. */
pkey1 = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE);
if (pkey1 < 0) {
- pkey1 = sys_pkey_alloc(0, 0);
+ pkey1 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
FAIL_IF(pkey1 < 0);
disable_execute = false;
}
- pkey2 = sys_pkey_alloc(0, 0);
+ pkey2 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
FAIL_IF(pkey2 < 0);
- pkey3 = sys_pkey_alloc(0, 0);
+ pkey3 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
FAIL_IF(pkey3 < 0);
info->amr |= 3ul << pkeyshift(pkey1) | 2ul << pkeyshift(pkey2);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
index fc633014424f..10f63042cf91 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c
@@ -57,16 +57,16 @@ static int child(struct shared_info *info)
/* Get some pkeys so that we can change their bits in the AMR. */
pkey1 = sys_pkey_alloc(0, PKEY_DISABLE_EXECUTE);
if (pkey1 < 0) {
- pkey1 = sys_pkey_alloc(0, 0);
+ pkey1 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
CHILD_FAIL_IF(pkey1 < 0, &info->child_sync);
disable_execute = false;
}
- pkey2 = sys_pkey_alloc(0, 0);
+ pkey2 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
CHILD_FAIL_IF(pkey2 < 0, &info->child_sync);
- pkey3 = sys_pkey_alloc(0, 0);
+ pkey3 = sys_pkey_alloc(0, PKEY_UNRESTRICTED);
CHILD_FAIL_IF(pkey3 < 0, &info->child_sync);
info->amr1 |= 3ul << pkeyshift(pkey1);
diff --git a/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh b/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh
index 2e63ef009d59..2db12c5cad9c 100755
--- a/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh
+++ b/tools/testing/selftests/rcutorture/bin/srcu_lockdep.sh
@@ -49,7 +49,7 @@ do
do
err=
val=$((d*1000+t*10+c))
- tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --bootargs "rcutorture.test_srcu_lockdep=$val" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1
+ tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --kconfig "CONFIG_FORCE_NEED_SRCU_NMI_SAFE=y" --bootargs "rcutorture.test_srcu_lockdep=$val rcutorture.reader_flavor=0x2" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1
ret=$?
mv "$T/kvm.sh.out" "$RCUTORTURE/res/$ds/$val"
if test "$d" -ne 0 && test "$ret" -eq 0
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
index 2db39f298d18..fb61703690cb 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
@@ -2,3 +2,4 @@ rcutorture.torture_type=srcud
rcupdate.rcu_self_test=1
rcutorture.fwd_progress=3
srcutree.big_cpu_lim=5
+rcutorture.reader_flavor=0x8
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
index c419cac233ee..54f5c9053474 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot
@@ -2,3 +2,9 @@ rcutree.gp_preinit_delay=3
rcutree.gp_init_delay=3
rcutree.gp_cleanup_delay=3
rcupdate.rcu_self_test=1
+
+# This part is for synchronize_rcu() testing
+rcutorture.nfakewriters=-1
+rcutorture.gp_sync=1
+rcupdate.rcu_normal=1
+rcutree.rcu_normal_wake_from_gp=1
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index d30922d8c883..352393bc5c56 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -1,7 +1,8 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=16
-CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT_LAZY=y
CONFIG_PREEMPT=n
CONFIG_PREEMPT_DYNAMIC=n
#CHECK#CONFIG_TREE_RCU=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE10 b/tools/testing/selftests/rcutorture/configs/rcu/TREE10
index 759ee51d3ddc..420632b030dc 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE10
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE10
@@ -1,6 +1,7 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=74
-CONFIG_PREEMPT_NONE=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_LAZY=y
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=n
CONFIG_PREEMPT_DYNAMIC=n
diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore
index 16496de5f6ce..0fda241fa62b 100644
--- a/tools/testing/selftests/rseq/.gitignore
+++ b/tools/testing/selftests/rseq/.gitignore
@@ -9,3 +9,4 @@ param_test_compare_twice
param_test_mm_cid
param_test_mm_cid_benchmark
param_test_mm_cid_compare_twice
+syscall_errors_test
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index 5a3432fceb58..0d0a5fae5954 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -16,11 +16,12 @@ OVERRIDE_TARGETS = 1
TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test param_test \
param_test_benchmark param_test_compare_twice param_test_mm_cid \
- param_test_mm_cid_benchmark param_test_mm_cid_compare_twice
+ param_test_mm_cid_benchmark param_test_mm_cid_compare_twice \
+ syscall_errors_test
TEST_GEN_PROGS_EXTENDED = librseq.so
-TEST_PROGS = run_param_test.sh
+TEST_PROGS = run_param_test.sh run_syscall_errors_test.sh
TEST_FILES := settings
@@ -54,3 +55,7 @@ $(OUTPUT)/param_test_mm_cid_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
$(OUTPUT)/param_test_mm_cid_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
rseq.h rseq-*.h
$(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/syscall_errors_test: syscall_errors_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index f6156790c3b4..663a9cef1952 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -71,9 +71,20 @@ static int rseq_ownership;
/* Original struct rseq allocation size is 32 bytes. */
#define ORIG_RSEQ_ALLOC_SIZE 32
+/*
+ * Use a union to ensure we allocate a TLS area of 1024 bytes to accomodate an
+ * rseq registration that is larger than the current rseq ABI.
+ */
+union rseq_tls {
+ struct rseq_abi abi;
+ char dummy[RSEQ_THREAD_AREA_ALLOC_SIZE];
+};
+
static
-__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"), aligned(RSEQ_THREAD_AREA_ALLOC_SIZE))) = {
- .cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED,
+__thread union rseq_tls __rseq __attribute__((tls_model("initial-exec"))) = {
+ .abi = {
+ .cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED,
+ },
};
static int sys_rseq(struct rseq_abi *rseq_abi, uint32_t rseq_len,
@@ -87,7 +98,7 @@ static int sys_getcpu(unsigned *cpu, unsigned *node)
return syscall(__NR_getcpu, cpu, node, NULL);
}
-int rseq_available(void)
+bool rseq_available(void)
{
int rc;
@@ -96,9 +107,9 @@ int rseq_available(void)
abort();
switch (errno) {
case ENOSYS:
- return 0;
+ return false;
case EINVAL:
- return 1;
+ return true;
default:
abort();
}
@@ -149,7 +160,7 @@ int rseq_register_current_thread(void)
/* Treat libc's ownership as a successful registration. */
return 0;
}
- rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
+ rc = sys_rseq(&__rseq.abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG);
if (rc) {
/*
* After at least one thread has registered successfully
@@ -183,7 +194,7 @@ int rseq_unregister_current_thread(void)
/* Treat libc's ownership as a successful unregistration. */
return 0;
}
- rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ rc = sys_rseq(&__rseq.abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
if (rc)
return -1;
return 0;
@@ -249,7 +260,7 @@ void rseq_init(void)
rseq_ownership = 1;
/* Calculate the offset of the rseq area from the thread pointer. */
- rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
+ rseq_offset = (void *)&__rseq.abi - rseq_thread_pointer();
/* rseq flags are deprecated, always set to 0. */
rseq_flags = 0;
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index ba424ce80a71..f51a5fdb0444 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -160,6 +160,11 @@ int32_t rseq_fallback_current_cpu(void);
int32_t rseq_fallback_current_node(void);
/*
+ * Returns true if rseq is supported.
+ */
+bool rseq_available(void);
+
+/*
* Values returned can be either the current CPU number, -1 (rseq is
* uninitialized), or -2 (rseq initialization has failed).
*/
diff --git a/tools/testing/selftests/rseq/run_syscall_errors_test.sh b/tools/testing/selftests/rseq/run_syscall_errors_test.sh
new file mode 100755
index 000000000000..9272246b39f2
--- /dev/null
+++ b/tools/testing/selftests/rseq/run_syscall_errors_test.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2024 Michael Jeanson <mjeanson@efficios.com>
+
+GLIBC_TUNABLES="${GLIBC_TUNABLES:-}:glibc.pthread.rseq=0" ./syscall_errors_test
diff --git a/tools/testing/selftests/rseq/syscall_errors_test.c b/tools/testing/selftests/rseq/syscall_errors_test.c
new file mode 100644
index 000000000000..a5d9e1f8a2dc
--- /dev/null
+++ b/tools/testing/selftests/rseq/syscall_errors_test.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: 2024 Michael Jeanson <mjeanson@efficios.com>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <assert.h>
+#include <stdint.h>
+#include <syscall.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "rseq.h"
+
+static int sys_rseq(void *rseq_abi, uint32_t rseq_len,
+ int flags, uint32_t sig)
+{
+ return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig);
+}
+
+/*
+ * Check the value of errno on some expected failures of the rseq syscall.
+ */
+
+int main(void)
+{
+ struct rseq_abi *global_rseq = rseq_get_abi();
+ int ret;
+ int errno_copy;
+
+ if (!rseq_available()) {
+ fprintf(stderr, "rseq syscall unavailable");
+ goto error;
+ }
+
+ /* The current thread is NOT registered. */
+
+ /* EINVAL */
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, -1, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Registration with invalid flag fails with errno set to EINVAL (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EINVAL)
+ goto error;
+
+ errno = 0;
+ ret = sys_rseq((char *) global_rseq + 1, 32, 0, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Registration with unaligned rseq_abi fails with errno set to EINVAL (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EINVAL)
+ goto error;
+
+ errno = 0;
+ ret = sys_rseq(global_rseq, 31, 0, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Registration with invalid size fails with errno set to EINVAL (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EINVAL)
+ goto error;
+
+
+#if defined(__LP64__) && (!defined(__s390__) && !defined(__s390x__))
+ /*
+ * We haven't found a reliable way to find an invalid address when
+ * running a 32bit userspace on a 64bit kernel, so only run this test
+ * on 64bit builds for the moment.
+ *
+ * Also exclude architectures that select
+ * CONFIG_ALTERNATE_USER_ADDRESS_SPACE where the kernel and userspace
+ * have their own address space and this failure can't happen.
+ */
+
+ /* EFAULT */
+ errno = 0;
+ ret = sys_rseq((void *) -4096UL, 32, 0, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Registration with invalid address fails with errno set to EFAULT (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EFAULT)
+ goto error;
+#endif
+
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, 0, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Registration succeeds for the current thread (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret != 0 && errno != 0)
+ goto error;
+
+ /* The current thread is registered. */
+
+ /* EBUSY */
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, 0, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Double registration fails with errno set to EBUSY (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EBUSY)
+ goto error;
+
+ /* EPERM */
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG + 1);
+ errno_copy = errno;
+ fprintf(stderr, "Unregistration with wrong RSEQ_SIG fails with errno to EPERM (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EPERM)
+ goto error;
+
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Unregistration succeeds for the current thread (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret != 0)
+ goto error;
+
+ errno = 0;
+ ret = sys_rseq(global_rseq, 32, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ errno_copy = errno;
+ fprintf(stderr, "Double unregistration fails with errno set to EINVAL (ret = %d, errno = %s)\n", ret, strerrorname_np(errno_copy));
+ if (ret == 0 || errno_copy != EINVAL)
+ goto error;
+
+ return 0;
+error:
+ return -1;
+}
diff --git a/tools/testing/selftests/sched/config b/tools/testing/selftests/sched/config
index e8b09aa7c0c4..1bb8bf6d7fd4 100644
--- a/tools/testing/selftests/sched/config
+++ b/tools/testing/selftests/sched/config
@@ -1 +1 @@
-CONFIG_SCHED_DEBUG=y
+# empty
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 011762224600..f4531327b8e7 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -172,6 +172,7 @@ auto-test-targets := \
maximal \
maybe_null \
minimal \
+ numa \
prog_run \
reload_loop \
select_cpu_dfl \
diff --git a/tools/testing/selftests/sched_ext/config b/tools/testing/selftests/sched_ext/config
index 0de9b4ee249d..aa901b05c8ad 100644
--- a/tools/testing/selftests/sched_ext/config
+++ b/tools/testing/selftests/sched_ext/config
@@ -1,4 +1,3 @@
-CONFIG_SCHED_DEBUG=y
CONFIG_SCHED_CLASS_EXT=y
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
diff --git a/tools/testing/selftests/sched_ext/numa.bpf.c b/tools/testing/selftests/sched_ext/numa.bpf.c
new file mode 100644
index 000000000000..a79d86ed54a1
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/numa.bpf.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A scheduler that validates the behavior of the NUMA-aware
+ * functionalities.
+ *
+ * The scheduler creates a separate DSQ for each NUMA node, ensuring tasks
+ * are exclusively processed by CPUs within their respective nodes. Idle
+ * CPUs are selected only within the same node, so task migration can only
+ * occurs between CPUs belonging to the same node.
+ *
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+
+#include <scx/common.bpf.h>
+
+char _license[] SEC("license") = "GPL";
+
+UEI_DEFINE(uei);
+
+const volatile unsigned int __COMPAT_SCX_PICK_IDLE_IN_NODE;
+
+static bool is_cpu_idle(s32 cpu, int node)
+{
+ const struct cpumask *idle_cpumask;
+ bool idle;
+
+ idle_cpumask = __COMPAT_scx_bpf_get_idle_cpumask_node(node);
+ idle = bpf_cpumask_test_cpu(cpu, idle_cpumask);
+ scx_bpf_put_cpumask(idle_cpumask);
+
+ return idle;
+}
+
+s32 BPF_STRUCT_OPS(numa_select_cpu,
+ struct task_struct *p, s32 prev_cpu, u64 wake_flags)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
+ s32 cpu;
+
+ /*
+ * We could just use __COMPAT_scx_bpf_pick_any_cpu_node() here,
+ * since it already tries to pick an idle CPU within the node
+ * first, but let's use both functions for better testing coverage.
+ */
+ cpu = __COMPAT_scx_bpf_pick_idle_cpu_node(p->cpus_ptr, node,
+ __COMPAT_SCX_PICK_IDLE_IN_NODE);
+ if (cpu < 0)
+ cpu = __COMPAT_scx_bpf_pick_any_cpu_node(p->cpus_ptr, node,
+ __COMPAT_SCX_PICK_IDLE_IN_NODE);
+
+ if (is_cpu_idle(cpu, node))
+ scx_bpf_error("CPU %d should be marked as busy", cpu);
+
+ if (__COMPAT_scx_bpf_cpu_node(cpu) != node)
+ scx_bpf_error("CPU %d should be in node %d", cpu, node);
+
+ return cpu;
+}
+
+void BPF_STRUCT_OPS(numa_enqueue, struct task_struct *p, u64 enq_flags)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(scx_bpf_task_cpu(p));
+
+ scx_bpf_dsq_insert(p, node, SCX_SLICE_DFL, enq_flags);
+}
+
+void BPF_STRUCT_OPS(numa_dispatch, s32 cpu, struct task_struct *prev)
+{
+ int node = __COMPAT_scx_bpf_cpu_node(cpu);
+
+ scx_bpf_dsq_move_to_local(node);
+}
+
+s32 BPF_STRUCT_OPS_SLEEPABLE(numa_init)
+{
+ int node, err;
+
+ bpf_for(node, 0, __COMPAT_scx_bpf_nr_node_ids()) {
+ err = scx_bpf_create_dsq(node, node);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void BPF_STRUCT_OPS(numa_exit, struct scx_exit_info *ei)
+{
+ UEI_RECORD(uei, ei);
+}
+
+SEC(".struct_ops.link")
+struct sched_ext_ops numa_ops = {
+ .select_cpu = (void *)numa_select_cpu,
+ .enqueue = (void *)numa_enqueue,
+ .dispatch = (void *)numa_dispatch,
+ .init = (void *)numa_init,
+ .exit = (void *)numa_exit,
+ .name = "numa",
+};
diff --git a/tools/testing/selftests/sched_ext/numa.c b/tools/testing/selftests/sched_ext/numa.c
new file mode 100644
index 000000000000..b060c3b65c82
--- /dev/null
+++ b/tools/testing/selftests/sched_ext/numa.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com>
+ */
+#include <bpf/bpf.h>
+#include <scx/common.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include "numa.bpf.skel.h"
+#include "scx_test.h"
+
+static enum scx_test_status setup(void **ctx)
+{
+ struct numa *skel;
+
+ skel = numa__open();
+ SCX_FAIL_IF(!skel, "Failed to open");
+ SCX_ENUM_INIT(skel);
+ skel->rodata->__COMPAT_SCX_PICK_IDLE_IN_NODE = SCX_PICK_IDLE_IN_NODE;
+ skel->struct_ops.numa_ops->flags = SCX_OPS_BUILTIN_IDLE_PER_NODE;
+ SCX_FAIL_IF(numa__load(skel), "Failed to load skel");
+
+ *ctx = skel;
+
+ return SCX_TEST_PASS;
+}
+
+static enum scx_test_status run(void *ctx)
+{
+ struct numa *skel = ctx;
+ struct bpf_link *link;
+
+ link = bpf_map__attach_struct_ops(skel->maps.numa_ops);
+ SCX_FAIL_IF(!link, "Failed to attach scheduler");
+
+ /* Just sleeping is fine, plenty of scheduling events happening */
+ sleep(1);
+
+ SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE));
+ bpf_link__destroy(link);
+
+ return SCX_TEST_PASS;
+}
+
+static void cleanup(void *ctx)
+{
+ struct numa *skel = ctx;
+
+ numa__destroy(skel);
+}
+
+struct scx_test numa = {
+ .name = "numa",
+ .description = "Verify NUMA-aware functionalities",
+ .setup = setup,
+ .run = run,
+ .cleanup = cleanup,
+};
+REGISTER_SCX_TEST(&numa)
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 14ba51b52095..b2f76a52215a 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -155,6 +155,12 @@ struct seccomp_data {
# endif
#endif
+#ifndef __NR_uretprobe
+# if defined(__x86_64__)
+# define __NR_uretprobe 335
+# endif
+#endif
+
#ifndef SECCOMP_SET_MODE_STRICT
#define SECCOMP_SET_MODE_STRICT 0
#endif
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 9814b3a1c77d..f0eceb0faf34 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -7,6 +7,7 @@
* Kernel loop code stolen from Steven Rostedt <srostedt@redhat.com>
*/
#define _GNU_SOURCE
+#include <sys/prctl.h>
#include <sys/time.h>
#include <sys/types.h>
#include <stdio.h>
@@ -599,14 +600,84 @@ static void check_overrun(int which, const char *name)
"check_overrun %s\n", name);
}
+#include <sys/syscall.h>
+
+static int do_timer_create(int *id)
+{
+ return syscall(__NR_timer_create, CLOCK_MONOTONIC, NULL, id);
+}
+
+static int do_timer_delete(int id)
+{
+ return syscall(__NR_timer_delete, id);
+}
+
+#ifndef PR_TIMER_CREATE_RESTORE_IDS
+# define PR_TIMER_CREATE_RESTORE_IDS 77
+# define PR_TIMER_CREATE_RESTORE_IDS_OFF 0
+# define PR_TIMER_CREATE_RESTORE_IDS_ON 1
+# define PR_TIMER_CREATE_RESTORE_IDS_GET 2
+#endif
+
+static void check_timer_create_exact(void)
+{
+ int id;
+
+ if (prctl(PR_TIMER_CREATE_RESTORE_IDS, PR_TIMER_CREATE_RESTORE_IDS_ON, 0, 0, 0)) {
+ switch (errno) {
+ case EINVAL:
+ ksft_test_result_skip("check timer create exact, not supported\n");
+ return;
+ default:
+ ksft_test_result_skip("check timer create exact, errno = %d\n", errno);
+ return;
+ }
+ }
+
+ if (prctl(PR_TIMER_CREATE_RESTORE_IDS, PR_TIMER_CREATE_RESTORE_IDS_GET, 0, 0, 0) != 1)
+ fatal_error(NULL, "prctl(GET) failed\n");
+
+ id = 8;
+ if (do_timer_create(&id) < 0)
+ fatal_error(NULL, "timer_create()");
+
+ if (do_timer_delete(id))
+ fatal_error(NULL, "timer_delete()");
+
+ if (prctl(PR_TIMER_CREATE_RESTORE_IDS, PR_TIMER_CREATE_RESTORE_IDS_OFF, 0, 0, 0))
+ fatal_error(NULL, "prctl(OFF)");
+
+ if (prctl(PR_TIMER_CREATE_RESTORE_IDS, PR_TIMER_CREATE_RESTORE_IDS_GET, 0, 0, 0) != 0)
+ fatal_error(NULL, "prctl(GET) failed\n");
+
+ if (id != 8) {
+ ksft_test_result_fail("check timer create exact %d != 8\n", id);
+ return;
+ }
+
+ /* Validate that it went back to normal mode and allocates ID 9 */
+ if (do_timer_create(&id) < 0)
+ fatal_error(NULL, "timer_create()");
+
+ if (do_timer_delete(id))
+ fatal_error(NULL, "timer_delete()");
+
+ if (id == 9)
+ ksft_test_result_pass("check timer create exact\n");
+ else
+ ksft_test_result_fail("check timer create exact. Disabling failed.\n");
+}
+
int main(int argc, char **argv)
{
ksft_print_header();
- ksft_set_plan(18);
+ ksft_set_plan(19);
ksft_print_msg("Testing posix timers. False negative may happen on CPU execution \n");
ksft_print_msg("based timers if other threads run on the CPU...\n");
+ check_timer_create_exact();
+
check_itimer(ITIMER_VIRTUAL, "ITIMER_VIRTUAL");
check_itimer(ITIMER_PROF, "ITIMER_PROF");
check_itimer(ITIMER_REAL, "ITIMER_REAL");
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
index 83450145fe65..46c391d7f45d 100644
--- a/tools/testing/selftests/timers/skew_consistency.c
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -47,7 +47,7 @@ int main(int argc, char **argv)
pid = fork();
if (!pid)
- return system("./inconsistency-check -c 1 -t 600");
+ return system("./inconsistency-check -t 60");
ppm = 500;
ret = 0;
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
index 1cf14a8da438..12a0614b9fd4 100644
--- a/tools/testing/selftests/vDSO/Makefile
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -19,13 +19,20 @@ LDLIBS += -lgcc_s
endif
include ../lib.mk
+
+CFLAGS += $(TOOLS_INCLUDES)
+
+CFLAGS_NOLIBC := -nostdlib -nostdinc -ffreestanding -fno-asynchronous-unwind-tables \
+ -fno-stack-protector -include $(top_srcdir)/tools/include/nolibc/nolibc.h \
+ -I$(top_srcdir)/tools/include/nolibc/ $(KHDR_INCLUDES)
+
$(OUTPUT)/vdso_test_gettimeofday: parse_vdso.c vdso_test_gettimeofday.c
$(OUTPUT)/vdso_test_getcpu: parse_vdso.c vdso_test_getcpu.c
$(OUTPUT)/vdso_test_abi: parse_vdso.c vdso_test_abi.c
$(OUTPUT)/vdso_test_clock_getres: vdso_test_clock_getres.c
-$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
-$(OUTPUT)/vdso_standalone_test_x86: CFLAGS +=-nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
+$(OUTPUT)/vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c | headers
+$(OUTPUT)/vdso_standalone_test_x86: CFLAGS:=$(CFLAGS_NOLIBC) $(CFLAGS)
$(OUTPUT)/vdso_test_correctness: vdso_test_correctness.c
$(OUTPUT)/vdso_test_correctness: LDFLAGS += -ldl
diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index f89d052c730e..3ff00fb624a4 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
@@ -19,13 +19,14 @@
#include <stdint.h>
#include <string.h>
#include <limits.h>
-#include <elf.h>
+#include <linux/auxvec.h>
+#include <linux/elf.h>
#include "parse_vdso.h"
/* And here's the code. */
#ifndef ELF_BITS
-# if ULONG_MAX > 0xffffffffUL
+# if __SIZEOF_LONG__ >= 8
# define ELF_BITS 64
# else
# define ELF_BITS 32
@@ -297,17 +298,3 @@ void *vdso_sym(const char *version, const char *name)
return 0;
}
-
-void vdso_init_from_auxv(void *auxv)
-{
- ELF(auxv_t) *elf_auxv = auxv;
- for (int i = 0; elf_auxv[i].a_type != AT_NULL; i++)
- {
- if (elf_auxv[i].a_type == AT_SYSINFO_EHDR) {
- vdso_init_from_sysinfo_ehdr(elf_auxv[i].a_un.a_val);
- return;
- }
- }
-
- vdso_info.valid = false;
-}
diff --git a/tools/testing/selftests/vDSO/parse_vdso.h b/tools/testing/selftests/vDSO/parse_vdso.h
index de0453067d7c..09d068ed11f9 100644
--- a/tools/testing/selftests/vDSO/parse_vdso.h
+++ b/tools/testing/selftests/vDSO/parse_vdso.h
@@ -26,6 +26,5 @@
*/
void *vdso_sym(const char *version, const char *name);
void vdso_init_from_sysinfo_ehdr(uintptr_t base);
-void vdso_init_from_auxv(void *auxv);
#endif
diff --git a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 644915862af8..9ce795b806f0 100644
--- a/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
@@ -1,142 +1,58 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * vdso_test.c: Sample code to test parse_vdso.c on x86
- * Copyright (c) 2011-2014 Andy Lutomirski
+ * vdso_test_gettimeofday.c: Sample code to test parse_vdso.c and
+ * vDSO gettimeofday()
+ * Copyright (c) 2014 Andy Lutomirski
*
- * You can amuse yourself by compiling with:
- * gcc -std=gnu99 -nostdlib
- * -Os -fno-asynchronous-unwind-tables -flto -lgcc_s
- * vdso_standalone_test_x86.c parse_vdso.c
- * to generate a small binary. On x86_64, you can omit -lgcc_s
- * if you want the binary to be completely standalone.
+ * Compile with:
+ * gcc -std=gnu99 vdso_test_gettimeofday.c parse_vdso_gettimeofday.c
+ *
+ * Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
*/
-#include <sys/syscall.h>
+#include <stdio.h>
+#ifndef NOLIBC
+#include <sys/auxv.h>
#include <sys/time.h>
-#include <unistd.h>
-#include <stdint.h>
-
-#include "parse_vdso.h"
-
-/* We need some libc functions... */
-int strcmp(const char *a, const char *b)
-{
- /* This implementation is buggy: it never returns -1. */
- while (*a || *b) {
- if (*a != *b)
- return 1;
- if (*a == 0 || *b == 0)
- return 1;
- a++;
- b++;
- }
-
- return 0;
-}
-
-/*
- * The clang build needs this, although gcc does not.
- * Stolen from lib/string.c.
- */
-void *memcpy(void *dest, const void *src, size_t count)
-{
- char *tmp = dest;
- const char *s = src;
-
- while (count--)
- *tmp++ = *s++;
- return dest;
-}
-
-/* ...and two syscalls. This is x86-specific. */
-static inline long x86_syscall3(long nr, long a0, long a1, long a2)
-{
- long ret;
-#ifdef __x86_64__
- asm volatile ("syscall" : "=a" (ret) : "a" (nr),
- "D" (a0), "S" (a1), "d" (a2) :
- "cc", "memory", "rcx",
- "r8", "r9", "r10", "r11" );
-#else
- asm volatile ("int $0x80" : "=a" (ret) : "a" (nr),
- "b" (a0), "c" (a1), "d" (a2) :
- "cc", "memory" );
#endif
- return ret;
-}
-static inline long linux_write(int fd, const void *data, size_t len)
-{
- return x86_syscall3(__NR_write, fd, (long)data, (long)len);
-}
+#include "../kselftest.h"
+#include "parse_vdso.h"
+#include "vdso_config.h"
+#include "vdso_call.h"
-static inline void linux_exit(int code)
+int main(int argc, char **argv)
{
- x86_syscall3(__NR_exit, code, 0, 0);
-}
+ const char *version = versions[VDSO_VERSION];
+ const char **name = (const char **)&names[VDSO_NAMES];
-void to_base10(char *lastdig, time_t n)
-{
- while (n) {
- *lastdig = (n % 10) + '0';
- n /= 10;
- lastdig--;
+ unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR);
+ if (!sysinfo_ehdr) {
+ printf("AT_SYSINFO_EHDR is not present!\n");
+ return KSFT_SKIP;
}
-}
-
-void c_main(void **stack)
-{
- /* Parse the stack */
- long argc = (long)*stack;
- stack += argc + 2;
-
- /* Now we're pointing at the environment. Skip it. */
- while(*stack)
- stack++;
- stack++;
- /* Now we're pointing at auxv. Initialize the vDSO parser. */
- vdso_init_from_auxv((void *)stack);
+ vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR));
/* Find gettimeofday. */
typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
- gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
+ gtod_t gtod = (gtod_t)vdso_sym(version, name[0]);
- if (!gtod)
- linux_exit(1);
+ if (!gtod) {
+ printf("Could not find %s\n", name[0]);
+ return KSFT_SKIP;
+ }
struct timeval tv;
- long ret = gtod(&tv, 0);
+ long ret = VDSO_CALL(gtod, 2, &tv, 0);
if (ret == 0) {
- char buf[] = "The time is .000000\n";
- to_base10(buf + 31, tv.tv_sec);
- to_base10(buf + 38, tv.tv_usec);
- linux_write(1, buf, sizeof(buf) - 1);
+ printf("The time is %lld.%06lld\n",
+ (long long)tv.tv_sec, (long long)tv.tv_usec);
} else {
- linux_exit(ret);
+ printf("%s failed\n", name[0]);
+ return KSFT_FAIL;
}
- linux_exit(0);
+ return 0;
}
-
-/*
- * This is the real entry point. It passes the initial stack into
- * the C entry point.
- */
-asm (
- ".text\n"
- ".global _start\n"
- ".type _start,@function\n"
- "_start:\n\t"
-#ifdef __x86_64__
- "mov %rsp,%rdi\n\t"
- "and $-16,%rsp\n\t"
- "sub $8,%rsp\n\t"
- "jmp c_main"
-#else
- "push %esp\n\t"
- "call c_main\n\t"
- "int $3"
-#endif
- );
diff --git a/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
index e31b18ffae33..9ce795b806f0 100644
--- a/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
+++ b/tools/testing/selftests/vDSO/vdso_test_gettimeofday.c
@@ -10,11 +10,11 @@
* Tested on x86, 32-bit and 64-bit. It may work on other architectures, too.
*/
-#include <stdint.h>
-#include <elf.h>
#include <stdio.h>
+#ifndef NOLIBC
#include <sys/auxv.h>
#include <sys/time.h>
+#endif
#include "../kselftest.h"
#include "parse_vdso.h"
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index 139fd9aa8b12..c305d2f613f0 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -27,7 +27,6 @@ CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHED_DEBUG=y
CONFIG_SCHED_INFO=y
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index d51249f14e2f..28422c32cc8f 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -19,7 +19,7 @@ TARGETS_C_32BIT_ONLY := entry_from_vm86 test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip syscall_numbering \
- corrupt_xstate_header amx lam test_shadow_stack
+ corrupt_xstate_header amx lam test_shadow_stack avx
# Some selftests require 32bit support enabled also on 64bit systems
TARGETS_C_32BIT_NEEDED := ldt_gdt ptrace_syscall
@@ -132,3 +132,7 @@ $(OUTPUT)/check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static
$(OUTPUT)/nx_stack_32: CFLAGS += -Wl,-z,noexecstack
$(OUTPUT)/nx_stack_64: CFLAGS += -Wl,-z,noexecstack
+
+$(OUTPUT)/avx_64: CFLAGS += -mno-avx -mno-avx512f
+$(OUTPUT)/amx_64: EXTRA_FILES += xstate.c
+$(OUTPUT)/avx_64: EXTRA_FILES += xstate.c
diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c
index 1fdf35a4d7f6..40769c16de1b 100644
--- a/tools/testing/selftests/x86/amx.c
+++ b/tools/testing/selftests/x86/amx.c
@@ -3,7 +3,6 @@
#define _GNU_SOURCE
#include <err.h>
#include <errno.h>
-#include <pthread.h>
#include <setjmp.h>
#include <stdio.h>
#include <string.h>
@@ -14,169 +13,27 @@
#include <sys/auxv.h>
#include <sys/mman.h>
#include <sys/shm.h>
-#include <sys/ptrace.h>
#include <sys/syscall.h>
#include <sys/wait.h>
-#include <sys/uio.h>
-#include "../kselftest.h" /* For __cpuid_count() */
+#include "helpers.h"
+#include "xstate.h"
#ifndef __x86_64__
# error This test is 64-bit only
#endif
-#define XSAVE_HDR_OFFSET 512
-#define XSAVE_HDR_SIZE 64
-
-struct xsave_buffer {
- union {
- struct {
- char legacy[XSAVE_HDR_OFFSET];
- char header[XSAVE_HDR_SIZE];
- char extended[0];
- };
- char bytes[0];
- };
-};
-
-static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
-{
- uint32_t rfbm_lo = rfbm;
- uint32_t rfbm_hi = rfbm >> 32;
-
- asm volatile("xsave (%%rdi)"
- : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)
- : "memory");
-}
-
-static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm)
-{
- uint32_t rfbm_lo = rfbm;
- uint32_t rfbm_hi = rfbm >> 32;
-
- asm volatile("xrstor (%%rdi)"
- : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi));
-}
-
/* err() exits and will not return */
#define fatal_error(msg, ...) err(1, "[FAIL]\t" msg, ##__VA_ARGS__)
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- fatal_error("sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- fatal_error("sigaction");
-}
-
-#define XFEATURE_XTILECFG 17
-#define XFEATURE_XTILEDATA 18
#define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG)
#define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA)
#define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA)
-#define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26)
-#define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27)
-
-static uint32_t xbuf_size;
-
-static struct {
- uint32_t xbuf_offset;
- uint32_t size;
-} xtiledata;
-
-#define CPUID_LEAF_XSTATE 0xd
-#define CPUID_SUBLEAF_XSTATE_USER 0x0
-#define TILE_CPUID 0x1d
-#define TILE_PALETTE_ID 0x1
-
-static void check_cpuid_xtiledata(void)
-{
- uint32_t eax, ebx, ecx, edx;
-
- __cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER,
- eax, ebx, ecx, edx);
-
- /*
- * EBX enumerates the size (in bytes) required by the XSAVE
- * instruction for an XSAVE area containing all the user state
- * components corresponding to bits currently set in XCR0.
- *
- * Stash that off so it can be used to allocate buffers later.
- */
- xbuf_size = ebx;
-
- __cpuid_count(CPUID_LEAF_XSTATE, XFEATURE_XTILEDATA,
- eax, ebx, ecx, edx);
- /*
- * eax: XTILEDATA state component size
- * ebx: XTILEDATA state component offset in user buffer
- */
- if (!eax || !ebx)
- fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d",
- eax, ebx);
-
- xtiledata.size = eax;
- xtiledata.xbuf_offset = ebx;
-}
+struct xstate_info xtiledata;
/* The helpers for managing XSAVE buffer and tile states: */
-struct xsave_buffer *alloc_xbuf(void)
-{
- struct xsave_buffer *xbuf;
-
- /* XSAVE buffer should be 64B-aligned. */
- xbuf = aligned_alloc(64, xbuf_size);
- if (!xbuf)
- fatal_error("aligned_alloc()");
- return xbuf;
-}
-
-static inline void clear_xstate_header(struct xsave_buffer *buffer)
-{
- memset(&buffer->header, 0, sizeof(buffer->header));
-}
-
-static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv)
-{
- /* XSTATE_BV is at the beginning of the header: */
- *(uint64_t *)(&buffer->header) = bv;
-}
-
-static void set_rand_tiledata(struct xsave_buffer *xbuf)
-{
- int *ptr = (int *)&xbuf->bytes[xtiledata.xbuf_offset];
- int data;
- int i;
-
- /*
- * Ensure that 'data' is never 0. This ensures that
- * the registers are never in their initial configuration
- * and thus never tracked as being in the init state.
- */
- data = rand() | 1;
-
- for (i = 0; i < xtiledata.size / sizeof(int); i++, ptr++)
- *ptr = data;
-}
-
struct xsave_buffer *stashed_xsave;
static void init_stashed_xsave(void)
@@ -192,21 +49,6 @@ static void free_stashed_xsave(void)
free(stashed_xsave);
}
-/* See 'struct _fpx_sw_bytes' at sigcontext.h */
-#define SW_BYTES_OFFSET 464
-/* N.B. The struct's field name varies so read from the offset. */
-#define SW_BYTES_BV_OFFSET (SW_BYTES_OFFSET + 8)
-
-static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *buffer)
-{
- return (struct _fpx_sw_bytes *)(buffer + SW_BYTES_OFFSET);
-}
-
-static inline uint64_t get_fpx_sw_bytes_features(void *buffer)
-{
- return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET);
-}
-
/* Work around printf() being unsafe in signals: */
#define SIGNAL_BUF_LEN 1000
char signal_message_buffer[SIGNAL_BUF_LEN];
@@ -304,17 +146,10 @@ static inline bool load_rand_tiledata(struct xsave_buffer *xbuf)
{
clear_xstate_header(xbuf);
set_xstatebv(xbuf, XFEATURE_MASK_XTILEDATA);
- set_rand_tiledata(xbuf);
+ set_rand_data(&xtiledata, xbuf);
return xrstor_safe(xbuf, XFEATURE_MASK_XTILEDATA);
}
-/* Return XTILEDATA to its initial configuration. */
-static inline void init_xtiledata(void)
-{
- clear_xstate_header(stashed_xsave);
- xrstor_safe(stashed_xsave, XFEATURE_MASK_XTILEDATA);
-}
-
enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED };
/* arch_prctl() and sigaltstack() test */
@@ -587,14 +422,6 @@ static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1)
return true;
}
-static inline void validate_tiledata_regs_same(struct xsave_buffer *xbuf)
-{
- int ret = __validate_tiledata_regs(xbuf);
-
- if (ret != 0)
- fatal_error("TILEDATA registers changed");
-}
-
static inline void validate_tiledata_regs_changed(struct xsave_buffer *xbuf)
{
int ret = __validate_tiledata_regs(xbuf);
@@ -651,251 +478,6 @@ static void test_fork(void)
_exit(0);
}
-/* Context switching test */
-
-static struct _ctxtswtest_cfg {
- unsigned int iterations;
- unsigned int num_threads;
-} ctxtswtest_config;
-
-struct futex_info {
- pthread_t thread;
- int nr;
- pthread_mutex_t mutex;
- struct futex_info *next;
-};
-
-static void *check_tiledata(void *info)
-{
- struct futex_info *finfo = (struct futex_info *)info;
- struct xsave_buffer *xbuf;
- int i;
-
- xbuf = alloc_xbuf();
- if (!xbuf)
- fatal_error("unable to allocate XSAVE buffer");
-
- /*
- * Load random data into 'xbuf' and then restore
- * it to the tile registers themselves.
- */
- load_rand_tiledata(xbuf);
- for (i = 0; i < ctxtswtest_config.iterations; i++) {
- pthread_mutex_lock(&finfo->mutex);
-
- /*
- * Ensure the register values have not
- * diverged from those recorded in 'xbuf'.
- */
- validate_tiledata_regs_same(xbuf);
-
- /* Load new, random values into xbuf and registers */
- load_rand_tiledata(xbuf);
-
- /*
- * The last thread's last unlock will be for
- * thread 0's mutex. However, thread 0 will
- * have already exited the loop and the mutex
- * will already be unlocked.
- *
- * Because this is not an ERRORCHECK mutex,
- * that inconsistency will be silently ignored.
- */
- pthread_mutex_unlock(&finfo->next->mutex);
- }
-
- free(xbuf);
- /*
- * Return this thread's finfo, which is
- * a unique value for this thread.
- */
- return finfo;
-}
-
-static int create_threads(int num, struct futex_info *finfo)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- int next_nr;
-
- finfo[i].nr = i;
- /*
- * Thread 'i' will wait on this mutex to
- * be unlocked. Lock it immediately after
- * initialization:
- */
- pthread_mutex_init(&finfo[i].mutex, NULL);
- pthread_mutex_lock(&finfo[i].mutex);
-
- next_nr = (i + 1) % num;
- finfo[i].next = &finfo[next_nr];
-
- if (pthread_create(&finfo[i].thread, NULL, check_tiledata, &finfo[i]))
- fatal_error("pthread_create()");
- }
- return 0;
-}
-
-static void affinitize_cpu0(void)
-{
- cpu_set_t cpuset;
-
- CPU_ZERO(&cpuset);
- CPU_SET(0, &cpuset);
-
- if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
- fatal_error("sched_setaffinity to CPU 0");
-}
-
-static void test_context_switch(void)
-{
- struct futex_info *finfo;
- int i;
-
- /* Affinitize to one CPU to force context switches */
- affinitize_cpu0();
-
- req_xtiledata_perm();
-
- printf("[RUN]\tCheck tiledata context switches, %d iterations, %d threads.\n",
- ctxtswtest_config.iterations,
- ctxtswtest_config.num_threads);
-
-
- finfo = malloc(sizeof(*finfo) * ctxtswtest_config.num_threads);
- if (!finfo)
- fatal_error("malloc()");
-
- create_threads(ctxtswtest_config.num_threads, finfo);
-
- /*
- * This thread wakes up thread 0
- * Thread 0 will wake up 1
- * Thread 1 will wake up 2
- * ...
- * the last thread will wake up 0
- *
- * ... this will repeat for the configured
- * number of iterations.
- */
- pthread_mutex_unlock(&finfo[0].mutex);
-
- /* Wait for all the threads to finish: */
- for (i = 0; i < ctxtswtest_config.num_threads; i++) {
- void *thread_retval;
- int rc;
-
- rc = pthread_join(finfo[i].thread, &thread_retval);
-
- if (rc)
- fatal_error("pthread_join() failed for thread %d err: %d\n",
- i, rc);
-
- if (thread_retval != &finfo[i])
- fatal_error("unexpected thread retval for thread %d: %p\n",
- i, thread_retval);
-
- }
-
- printf("[OK]\tNo incorrect case was found.\n");
-
- free(finfo);
-}
-
-/* Ptrace test */
-
-/*
- * Make sure the ptracee has the expanded kernel buffer on the first
- * use. Then, initialize the state before performing the state
- * injection from the ptracer.
- */
-static inline void ptracee_firstuse_tiledata(void)
-{
- load_rand_tiledata(stashed_xsave);
- init_xtiledata();
-}
-
-/*
- * Ptracer injects the randomized tile data state. It also reads
- * before and after that, which will execute the kernel's state copy
- * functions. So, the tester is advised to double-check any emitted
- * kernel messages.
- */
-static void ptracer_inject_tiledata(pid_t target)
-{
- struct xsave_buffer *xbuf;
- struct iovec iov;
-
- xbuf = alloc_xbuf();
- if (!xbuf)
- fatal_error("unable to allocate XSAVE buffer");
-
- printf("\tRead the init'ed tiledata via ptrace().\n");
-
- iov.iov_base = xbuf;
- iov.iov_len = xbuf_size;
-
- memset(stashed_xsave, 0, xbuf_size);
-
- if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
- fatal_error("PTRACE_GETREGSET");
-
- if (!__compare_tiledata_state(stashed_xsave, xbuf))
- printf("[OK]\tThe init'ed tiledata was read from ptracee.\n");
- else
- printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n");
-
- printf("\tInject tiledata via ptrace().\n");
-
- load_rand_tiledata(xbuf);
-
- memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset],
- &xbuf->bytes[xtiledata.xbuf_offset],
- xtiledata.size);
-
- if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
- fatal_error("PTRACE_SETREGSET");
-
- if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
- fatal_error("PTRACE_GETREGSET");
-
- if (!__compare_tiledata_state(stashed_xsave, xbuf))
- printf("[OK]\tTiledata was correctly written to ptracee.\n");
- else
- printf("[FAIL]\tTiledata was not correctly written to ptracee.\n");
-}
-
-static void test_ptrace(void)
-{
- pid_t child;
- int status;
-
- child = fork();
- if (child < 0) {
- err(1, "fork");
- } else if (!child) {
- if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
- err(1, "PTRACE_TRACEME");
-
- ptracee_firstuse_tiledata();
-
- raise(SIGTRAP);
- _exit(0);
- }
-
- do {
- wait(&status);
- } while (WSTOPSIG(status) != SIGTRAP);
-
- ptracer_inject_tiledata(child);
-
- ptrace(PTRACE_DETACH, child, NULL, NULL);
- wait(&status);
- if (!WIFEXITED(status) || WEXITSTATUS(status))
- err(1, "ptrace test");
-}
-
int main(void)
{
unsigned long features;
@@ -907,7 +489,11 @@ int main(void)
return KSFT_SKIP;
}
- check_cpuid_xtiledata();
+ xtiledata = get_xstate_info(XFEATURE_XTILEDATA);
+ if (!xtiledata.size || !xtiledata.xbuf_offset) {
+ fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d",
+ xtiledata.size, xtiledata.xbuf_offset);
+ }
init_stashed_xsave();
sethandler(SIGILL, handle_noperm, 0);
@@ -919,11 +505,11 @@ int main(void)
test_fork();
- ctxtswtest_config.iterations = 10;
- ctxtswtest_config.num_threads = 5;
- test_context_switch();
-
- test_ptrace();
+ /*
+ * Perform generic xstate tests for context switching, ptrace,
+ * and signal.
+ */
+ test_xstate(XFEATURE_XTILEDATA);
clearhandler(SIGILL);
free_stashed_xsave();
diff --git a/tools/testing/selftests/x86/avx.c b/tools/testing/selftests/x86/avx.c
new file mode 100644
index 000000000000..11d5367c235f
--- /dev/null
+++ b/tools/testing/selftests/x86/avx.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE /* Required for inline xstate helpers */
+#include "xstate.h"
+
+int main(void)
+{
+ test_xstate(XFEATURE_YMM);
+ test_xstate(XFEATURE_OPMASK);
+ test_xstate(XFEATURE_ZMM_Hi256);
+ test_xstate(XFEATURE_Hi16_ZMM);
+}
diff --git a/tools/testing/selftests/x86/corrupt_xstate_header.c b/tools/testing/selftests/x86/corrupt_xstate_header.c
index cf9ce8fbb656..93a89a5997ca 100644
--- a/tools/testing/selftests/x86/corrupt_xstate_header.c
+++ b/tools/testing/selftests/x86/corrupt_xstate_header.c
@@ -18,6 +18,7 @@
#include <sys/wait.h>
#include "../kselftest.h" /* For __cpuid_count() */
+#include "helpers.h"
static inline int xsave_enabled(void)
{
@@ -29,19 +30,6 @@ static inline int xsave_enabled(void)
return ecx & (1U << 27);
}
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static void sigusr1(int sig, siginfo_t *info, void *uc_void)
{
ucontext_t *uc = uc_void;
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index d1e919b0c1dc..5cb8393737d0 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -24,31 +24,11 @@
#include <errno.h>
#include <sys/vm86.h>
+#include "helpers.h"
+
static unsigned long load_addr = 0x10000;
static int nerrs = 0;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static sig_atomic_t got_signal;
static void sighandler(int sig, siginfo_t *info, void *ctx_void)
diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
index 50cf32de6313..0a75252d31b6 100644
--- a/tools/testing/selftests/x86/fsgsbase.c
+++ b/tools/testing/selftests/x86/fsgsbase.c
@@ -28,6 +28,8 @@
#include <sys/wait.h>
#include <setjmp.h>
+#include "helpers.h"
+
#ifndef __x86_64__
# error This test is 64-bit only
#endif
@@ -39,28 +41,6 @@ static unsigned short *shared_scratch;
static int nerrs;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static void sigsegv(int sig, siginfo_t *si, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t*)ctx_void;
diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h
index 4ef42c4559a9..6deaad035161 100644
--- a/tools/testing/selftests/x86/helpers.h
+++ b/tools/testing/selftests/x86/helpers.h
@@ -2,8 +2,13 @@
#ifndef __SELFTESTS_X86_HELPERS_H
#define __SELFTESTS_X86_HELPERS_H
+#include <signal.h>
+#include <string.h>
+
#include <asm/processor-flags.h>
+#include "../kselftest.h"
+
static inline unsigned long get_eflags(void)
{
#ifdef __x86_64__
@@ -22,4 +27,27 @@ static inline void set_eflags(unsigned long eflags)
#endif
}
+static inline void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), int flags)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = handler;
+ sa.sa_flags = SA_SIGINFO | flags;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ ksft_exit_fail_msg("sigaction failed");
+}
+
+static inline void clearhandler(int sig)
+{
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(sig, &sa, 0))
+ ksft_exit_fail_msg("sigaction failed");
+}
+
#endif /* __SELFTESTS_X86_HELPERS_H */
diff --git a/tools/testing/selftests/x86/ioperm.c b/tools/testing/selftests/x86/ioperm.c
index 57ec5e99edb9..69d5fb7050c2 100644
--- a/tools/testing/selftests/x86/ioperm.c
+++ b/tools/testing/selftests/x86/ioperm.c
@@ -20,30 +20,9 @@
#include <sched.h>
#include <sys/io.h>
-static int nerrs = 0;
-
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-
-}
+#include "helpers.h"
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
+static int nerrs = 0;
static jmp_buf jmpbuf;
diff --git a/tools/testing/selftests/x86/iopl.c b/tools/testing/selftests/x86/iopl.c
index 7e3e09c1abac..457b6715542b 100644
--- a/tools/testing/selftests/x86/iopl.c
+++ b/tools/testing/selftests/x86/iopl.c
@@ -20,30 +20,9 @@
#include <sched.h>
#include <sys/io.h>
-static int nerrs = 0;
-
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-
-}
+#include "helpers.h"
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
+static int nerrs = 0;
static jmp_buf jmpbuf;
diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c
index 4d4a76532dc9..18d736640ece 100644
--- a/tools/testing/selftests/x86/lam.c
+++ b/tools/testing/selftests/x86/lam.c
@@ -4,6 +4,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/syscall.h>
+#include <sys/ioctl.h>
#include <time.h>
#include <signal.h>
#include <setjmp.h>
@@ -43,7 +44,15 @@
#define FUNC_INHERITE 0x20
#define FUNC_PASID 0x40
+/* get_user() pointer test cases */
+#define GET_USER_USER 0
+#define GET_USER_KERNEL_TOP 1
+#define GET_USER_KERNEL_BOT 2
+#define GET_USER_KERNEL 3
+
#define TEST_MASK 0x7f
+#define L5_SIGN_EXT_MASK (0xFFUL << 56)
+#define L4_SIGN_EXT_MASK (0x1FFFFUL << 47)
#define LOW_ADDR (0x1UL << 30)
#define HIGH_ADDR (0x3UL << 48)
@@ -115,23 +124,42 @@ static void segv_handler(int sig)
siglongjmp(segv_env, 1);
}
-static inline int cpu_has_lam(void)
+static inline int lam_is_available(void)
{
unsigned int cpuinfo[4];
+ unsigned long bits = 0;
+ int ret;
__cpuid_count(0x7, 1, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
- return (cpuinfo[0] & (1 << 26));
+ /* Check if cpu supports LAM */
+ if (!(cpuinfo[0] & (1 << 26))) {
+ ksft_print_msg("LAM is not supported!\n");
+ return 0;
+ }
+
+ /* Return 0 if CONFIG_ADDRESS_MASKING is not set */
+ ret = syscall(SYS_arch_prctl, ARCH_GET_MAX_TAG_BITS, &bits);
+ if (ret) {
+ ksft_print_msg("LAM is disabled in the kernel!\n");
+ return 0;
+ }
+
+ return 1;
}
-/* Check 5-level page table feature in CPUID.(EAX=07H, ECX=00H):ECX.[bit 16] */
-static inline int cpu_has_la57(void)
+static inline int la57_enabled(void)
{
- unsigned int cpuinfo[4];
+ int ret;
+ void *p;
+
+ p = mmap((void *)HIGH_ADDR, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
- __cpuid_count(0x7, 0, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
+ ret = p == MAP_FAILED ? 0 : 1;
- return (cpuinfo[2] & (1 << 16));
+ munmap(p, PAGE_SIZE);
+ return ret;
}
/*
@@ -322,7 +350,7 @@ static int handle_mmap(struct testcases *test)
flags, -1, 0);
if (ptr == MAP_FAILED) {
if (test->addr == HIGH_ADDR)
- if (!cpu_has_la57())
+ if (!la57_enabled())
return 3; /* unsupport LA57 */
return 1;
}
@@ -370,6 +398,78 @@ static int handle_syscall(struct testcases *test)
return ret;
}
+static int get_user_syscall(struct testcases *test)
+{
+ uint64_t ptr_address, bitmask;
+ int fd, ret = 0;
+ void *ptr;
+
+ if (la57_enabled()) {
+ bitmask = L5_SIGN_EXT_MASK;
+ ptr_address = HIGH_ADDR;
+ } else {
+ bitmask = L4_SIGN_EXT_MASK;
+ ptr_address = LOW_ADDR;
+ }
+
+ ptr = mmap((void *)ptr_address, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+
+ if (ptr == MAP_FAILED) {
+ perror("failed to map byte to pass into get_user");
+ return 1;
+ }
+
+ if (set_lam(test->lam) != 0) {
+ ret = 2;
+ goto error;
+ }
+
+ fd = memfd_create("lam_ioctl", 0);
+ if (fd == -1) {
+ munmap(ptr, PAGE_SIZE);
+ exit(EXIT_FAILURE);
+ }
+
+ switch (test->later) {
+ case GET_USER_USER:
+ /* Control group - properly tagged user pointer */
+ ptr = (void *)set_metadata((uint64_t)ptr, test->lam);
+ break;
+ case GET_USER_KERNEL_TOP:
+ /* Kernel address with top bit cleared */
+ bitmask &= (bitmask >> 1);
+ ptr = (void *)((uint64_t)ptr | bitmask);
+ break;
+ case GET_USER_KERNEL_BOT:
+ /* Kernel address with bottom sign-extension bit cleared */
+ bitmask &= (bitmask << 1);
+ ptr = (void *)((uint64_t)ptr | bitmask);
+ break;
+ case GET_USER_KERNEL:
+ /* Try to pass a kernel address */
+ ptr = (void *)((uint64_t)ptr | bitmask);
+ break;
+ default:
+ printf("Invalid test case value passed!\n");
+ break;
+ }
+
+ /*
+ * Use FIOASYNC ioctl because it utilizes get_user() internally and is
+ * very non-invasive to the system. Pass differently tagged pointers to
+ * get_user() in order to verify that valid user pointers are going
+ * through and invalid kernel/non-canonical pointers are not.
+ */
+ if (ioctl(fd, FIOASYNC, ptr) != 0)
+ ret = 1;
+
+ close(fd);
+error:
+ munmap(ptr, PAGE_SIZE);
+ return ret;
+}
+
int sys_uring_setup(unsigned int entries, struct io_uring_params *p)
{
return (int)syscall(__NR_io_uring_setup, entries, p);
@@ -596,8 +696,10 @@ int do_uring(unsigned long lam)
fi->file_fd = file_fd;
ring = malloc(sizeof(*ring));
- if (!ring)
+ if (!ring) {
+ free(fi);
return 1;
+ }
memset(ring, 0, sizeof(struct io_ring));
@@ -883,6 +985,33 @@ static struct testcases syscall_cases[] = {
.test_func = handle_syscall,
.msg = "SYSCALL:[Negative] Disable LAM. Dereferencing pointer with metadata.\n",
},
+ {
+ .later = GET_USER_USER,
+ .lam = LAM_U57_BITS,
+ .test_func = get_user_syscall,
+ .msg = "GET_USER: get_user() and pass a properly tagged user pointer.\n",
+ },
+ {
+ .later = GET_USER_KERNEL_TOP,
+ .expected = 1,
+ .lam = LAM_U57_BITS,
+ .test_func = get_user_syscall,
+ .msg = "GET_USER:[Negative] get_user() with a kernel pointer and the top bit cleared.\n",
+ },
+ {
+ .later = GET_USER_KERNEL_BOT,
+ .expected = 1,
+ .lam = LAM_U57_BITS,
+ .test_func = get_user_syscall,
+ .msg = "GET_USER:[Negative] get_user() with a kernel pointer and the bottom sign-extension bit cleared.\n",
+ },
+ {
+ .later = GET_USER_KERNEL,
+ .expected = 1,
+ .lam = LAM_U57_BITS,
+ .test_func = get_user_syscall,
+ .msg = "GET_USER:[Negative] get_user() and pass a kernel pointer.\n",
+ },
};
static struct testcases mmap_cases[] = {
@@ -1181,10 +1310,8 @@ int main(int argc, char **argv)
tests_cnt = 0;
- if (!cpu_has_lam()) {
- ksft_print_msg("Unsupported LAM feature!\n");
+ if (!lam_is_available())
return KSFT_SKIP;
- }
while ((c = getopt(argc, argv, "ht:")) != -1) {
switch (c) {
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 3a29346e1452..bb99a71380a5 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -26,6 +26,8 @@
#include <asm/prctl.h>
#include <sys/prctl.h>
+#include "helpers.h"
+
#define AR_ACCESSED (1<<8)
#define AR_TYPE_RODATA (0 * (1<<9))
@@ -506,20 +508,6 @@ static void fix_sa_restorer(int sig)
}
#endif
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-
- fix_sa_restorer(sig);
-}
-
static jmp_buf jmpbuf;
static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
@@ -549,9 +537,11 @@ static void do_multicpu_tests(void)
}
sethandler(SIGSEGV, sigsegv, 0);
+ fix_sa_restorer(SIGSEGV);
#ifdef __i386__
/* True 32-bit kernels send SIGILL instead of SIGSEGV on IRET faults. */
sethandler(SIGILL, sigsegv, 0);
+ fix_sa_restorer(SIGILL);
#endif
printf("[RUN]\tCross-CPU LDT invalidation\n");
diff --git a/tools/testing/selftests/x86/mov_ss_trap.c b/tools/testing/selftests/x86/mov_ss_trap.c
index cc3de6ff9fba..f22cb6b382f9 100644
--- a/tools/testing/selftests/x86/mov_ss_trap.c
+++ b/tools/testing/selftests/x86/mov_ss_trap.c
@@ -36,7 +36,7 @@
#include <setjmp.h>
#include <sys/prctl.h>
-#define X86_EFLAGS_RF (1UL << 16)
+#include "helpers.h"
#if __x86_64__
# define REG_IP REG_RIP
@@ -94,18 +94,6 @@ static void enable_watchpoint(void)
}
}
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static char const * const signames[] = {
[SIGSEGV] = "SIGSEGV",
[SIGBUS] = "SIBGUS",
diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
index 12aaa063196e..360ec88d5432 100644
--- a/tools/testing/selftests/x86/ptrace_syscall.c
+++ b/tools/testing/selftests/x86/ptrace_syscall.c
@@ -15,6 +15,8 @@
#include <asm/ptrace-abi.h>
#include <sys/auxv.h>
+#include "helpers.h"
+
/* Bitness-agnostic defines for user_regs_struct fields. */
#ifdef __x86_64__
# define user_syscall_nr orig_rax
@@ -93,18 +95,6 @@ static siginfo_t wait_trap(pid_t chld)
return si;
}
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static void setsigign(int sig, int flags)
{
struct sigaction sa;
@@ -116,16 +106,6 @@ static void setsigign(int sig, int flags)
err(1, "sigaction");
}
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
#ifdef __x86_64__
# define REG_BP REG_RBP
#else
diff --git a/tools/testing/selftests/x86/sigaltstack.c b/tools/testing/selftests/x86/sigaltstack.c
index f689af75e979..0ae1b784498c 100644
--- a/tools/testing/selftests/x86/sigaltstack.c
+++ b/tools/testing/selftests/x86/sigaltstack.c
@@ -14,6 +14,8 @@
#include <sys/resource.h>
#include <setjmp.h>
+#include "helpers.h"
+
/* sigaltstack()-enforced minimum stack */
#define ENFORCED_MINSIGSTKSZ 2048
@@ -27,30 +29,6 @@ static bool sigalrm_expected;
static unsigned long at_minstack_size;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static int setup_altstack(void *start, unsigned long size)
{
stack_t ss;
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 0b75b29f794b..26ef562f4232 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -46,6 +46,8 @@
#include <sys/ptrace.h>
#include <sys/user.h>
+#include "helpers.h"
+
/* Pull in AR_xyz defines. */
typedef unsigned int u32;
typedef unsigned short u16;
@@ -138,28 +140,6 @@ static unsigned short LDT3(int idx)
return (idx << 3) | 7;
}
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static void add_ldt(const struct user_desc *desc, unsigned short *var,
const char *name)
{
diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
index 9a30f443e928..280d7a22b9c9 100644
--- a/tools/testing/selftests/x86/single_step_syscall.c
+++ b/tools/testing/selftests/x86/single_step_syscall.c
@@ -33,28 +33,6 @@
#include "helpers.h"
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static volatile sig_atomic_t sig_traps, sig_eflags;
sigjmp_buf jmpbuf;
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
index 48ab065a76f9..f67a2df335ba 100644
--- a/tools/testing/selftests/x86/syscall_arg_fault.c
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -17,18 +17,6 @@
#include "helpers.h"
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static sigjmp_buf jmpbuf;
static volatile sig_atomic_t n_errs;
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
index a108b80dd082..f9c9814160f0 100644
--- a/tools/testing/selftests/x86/syscall_nt.c
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -18,18 +18,6 @@
static unsigned int nerrs;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static void sigtrap(int sig, siginfo_t *si, void *ctx_void)
{
}
diff --git a/tools/testing/selftests/x86/syscall_numbering.c b/tools/testing/selftests/x86/syscall_numbering.c
index 991591718bb0..41c42b7b54a6 100644
--- a/tools/testing/selftests/x86/syscall_numbering.c
+++ b/tools/testing/selftests/x86/syscall_numbering.c
@@ -25,6 +25,7 @@
#include <sys/mman.h>
#include <linux/ptrace.h>
+#include "../kselftest.h"
/* Common system call numbers */
#define SYS_READ 0
@@ -313,7 +314,7 @@ static void test_syscall_numbering(void)
* The MSB is supposed to be ignored, so we loop over a few
* to test that out.
*/
- for (size_t i = 0; i < sizeof(msbs)/sizeof(msbs[0]); i++) {
+ for (size_t i = 0; i < ARRAY_SIZE(msbs); i++) {
int msb = msbs[i];
run("Checking system calls with msb = %d (0x%x)\n",
msb, msb);
diff --git a/tools/testing/selftests/x86/sysret_rip.c b/tools/testing/selftests/x86/sysret_rip.c
index b30de9aaa6d4..5fb531e3ad7c 100644
--- a/tools/testing/selftests/x86/sysret_rip.c
+++ b/tools/testing/selftests/x86/sysret_rip.c
@@ -22,6 +22,8 @@
#include <sys/mman.h>
#include <assert.h>
+#include "helpers.h"
+
/*
* These items are in clang_helpers_64.S, in order to avoid clang inline asm
* limitations:
@@ -31,28 +33,6 @@ extern const char test_page[];
static void const *current_test_page_addr = test_page;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
-static void clearhandler(int sig)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_handler = SIG_DFL;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
/* State used by our signal handlers. */
static gregset_t initial_regs;
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 6de11b4df458..05e1e6774fba 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -310,19 +310,6 @@ static void test_getcpu(int cpu)
static jmp_buf jmpbuf;
static volatile unsigned long segv_err;
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- ksft_exit_fail_msg("sigaction failed\n");
-}
-
static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
{
ucontext_t *ctx = (ucontext_t *)ctx_void;
diff --git a/tools/testing/selftests/x86/unwind_vdso.c b/tools/testing/selftests/x86/unwind_vdso.c
index 4c311e1af4c7..9cc17588d818 100644
--- a/tools/testing/selftests/x86/unwind_vdso.c
+++ b/tools/testing/selftests/x86/unwind_vdso.c
@@ -43,18 +43,6 @@ int main()
#include <dlfcn.h>
#include <unwind.h>
-static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
- int flags)
-{
- struct sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sa.sa_sigaction = handler;
- sa.sa_flags = SA_SIGINFO | flags;
- sigemptyset(&sa.sa_mask);
- if (sigaction(sig, &sa, 0))
- err(1, "sigaction");
-}
-
static volatile sig_atomic_t nerrs;
static unsigned long sysinfo;
static bool got_sysinfo = false;
diff --git a/tools/testing/selftests/x86/xstate.c b/tools/testing/selftests/x86/xstate.c
new file mode 100644
index 000000000000..23c1d6c964ea
--- /dev/null
+++ b/tools/testing/selftests/x86/xstate.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+
+#include <elf.h>
+#include <pthread.h>
+#include <stdbool.h>
+
+#include <asm/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+
+#include "helpers.h"
+#include "xstate.h"
+
+/*
+ * The userspace xstate test suite is designed to be generic and operates
+ * with randomized xstate data. However, some states require special handling:
+ *
+ * - PKRU and XTILECFG need specific adjustments, such as modifying
+ * randomization behavior or using fixed values.
+ * - But, PKRU already has a dedicated test suite in /tools/selftests/mm.
+ * - Legacy states (FP and SSE) are excluded, as they are not considered
+ * part of extended states (xstates) and their usage is already deeply
+ * integrated into user-space libraries.
+ */
+#define XFEATURE_MASK_TEST_SUPPORTED \
+ ((1 << XFEATURE_YMM) | \
+ (1 << XFEATURE_OPMASK) | \
+ (1 << XFEATURE_ZMM_Hi256) | \
+ (1 << XFEATURE_Hi16_ZMM) | \
+ (1 << XFEATURE_XTILEDATA))
+
+static inline uint64_t xgetbv(uint32_t index)
+{
+ uint32_t eax, edx;
+
+ asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index));
+ return eax + ((uint64_t)edx << 32);
+}
+
+static inline uint64_t get_xstatebv(struct xsave_buffer *xbuf)
+{
+ return *(uint64_t *)(&xbuf->header);
+}
+
+static struct xstate_info xstate;
+
+struct futex_info {
+ unsigned int iterations;
+ struct futex_info *next;
+ pthread_mutex_t mutex;
+ pthread_t thread;
+ bool valid;
+ int nr;
+};
+
+static inline void load_rand_xstate(struct xstate_info *xstate, struct xsave_buffer *xbuf)
+{
+ clear_xstate_header(xbuf);
+ set_xstatebv(xbuf, xstate->mask);
+ set_rand_data(xstate, xbuf);
+ xrstor(xbuf, xstate->mask);
+}
+
+static inline void load_init_xstate(struct xstate_info *xstate, struct xsave_buffer *xbuf)
+{
+ clear_xstate_header(xbuf);
+ xrstor(xbuf, xstate->mask);
+}
+
+static inline void copy_xstate(struct xsave_buffer *xbuf_dst, struct xsave_buffer *xbuf_src)
+{
+ memcpy(&xbuf_dst->bytes[xstate.xbuf_offset],
+ &xbuf_src->bytes[xstate.xbuf_offset],
+ xstate.size);
+}
+
+static inline bool validate_xstate_same(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2)
+{
+ int ret;
+
+ ret = memcmp(&xbuf1->bytes[xstate.xbuf_offset],
+ &xbuf2->bytes[xstate.xbuf_offset],
+ xstate.size);
+ return ret == 0;
+}
+
+static inline bool validate_xregs_same(struct xsave_buffer *xbuf1)
+{
+ struct xsave_buffer *xbuf2;
+ bool ret;
+
+ xbuf2 = alloc_xbuf();
+ if (!xbuf2)
+ ksft_exit_fail_msg("failed to allocate XSAVE buffer\n");
+
+ xsave(xbuf2, xstate.mask);
+ ret = validate_xstate_same(xbuf1, xbuf2);
+
+ free(xbuf2);
+ return ret;
+}
+
+/* Context switching test */
+
+static void *check_xstate(void *info)
+{
+ struct futex_info *finfo = (struct futex_info *)info;
+ struct xsave_buffer *xbuf;
+ int i;
+
+ xbuf = alloc_xbuf();
+ if (!xbuf)
+ ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
+
+ /*
+ * Load random data into 'xbuf' and then restore it to the xstate
+ * registers.
+ */
+ load_rand_xstate(&xstate, xbuf);
+ finfo->valid = true;
+
+ for (i = 0; i < finfo->iterations; i++) {
+ pthread_mutex_lock(&finfo->mutex);
+
+ /*
+ * Ensure the register values have not diverged from the
+ * record. Then reload a new random value. If it failed
+ * ever before, skip it.
+ */
+ if (finfo->valid) {
+ finfo->valid = validate_xregs_same(xbuf);
+ load_rand_xstate(&xstate, xbuf);
+ }
+
+ /*
+ * The last thread's last unlock will be for thread 0's
+ * mutex. However, thread 0 will have already exited the
+ * loop and the mutex will already be unlocked.
+ *
+ * Because this is not an ERRORCHECK mutex, that
+ * inconsistency will be silently ignored.
+ */
+ pthread_mutex_unlock(&finfo->next->mutex);
+ }
+
+ free(xbuf);
+ return finfo;
+}
+
+static void create_threads(uint32_t num_threads, uint32_t iterations, struct futex_info *finfo)
+{
+ int i;
+
+ for (i = 0; i < num_threads; i++) {
+ int next_nr;
+
+ finfo[i].nr = i;
+ finfo[i].iterations = iterations;
+
+ /*
+ * Thread 'i' will wait on this mutex to be unlocked.
+ * Lock it immediately after initialization:
+ */
+ pthread_mutex_init(&finfo[i].mutex, NULL);
+ pthread_mutex_lock(&finfo[i].mutex);
+
+ next_nr = (i + 1) % num_threads;
+ finfo[i].next = &finfo[next_nr];
+
+ if (pthread_create(&finfo[i].thread, NULL, check_xstate, &finfo[i]))
+ ksft_exit_fail_msg("pthread_create() failed\n");
+ }
+}
+
+static bool checkout_threads(uint32_t num_threads, struct futex_info *finfo)
+{
+ void *thread_retval;
+ bool valid = true;
+ int err, i;
+
+ for (i = 0; i < num_threads; i++) {
+ err = pthread_join(finfo[i].thread, &thread_retval);
+ if (err)
+ ksft_exit_fail_msg("pthread_join() failed for thread %d err: %d\n", i, err);
+
+ if (thread_retval != &finfo[i]) {
+ ksft_exit_fail_msg("unexpected thread retval for thread %d: %p\n",
+ i, thread_retval);
+ }
+
+ valid &= finfo[i].valid;
+ }
+
+ return valid;
+}
+
+static void affinitize_cpu0(void)
+{
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+ ksft_exit_fail_msg("sched_setaffinity to CPU 0 failed\n");
+}
+
+static void test_context_switch(uint32_t num_threads, uint32_t iterations)
+{
+ struct futex_info *finfo;
+
+ /* Affinitize to one CPU to force context switches */
+ affinitize_cpu0();
+
+ printf("[RUN]\t%s: check context switches, %d iterations, %d threads.\n",
+ xstate.name, iterations, num_threads);
+
+ finfo = malloc(sizeof(*finfo) * num_threads);
+ if (!finfo)
+ ksft_exit_fail_msg("unable allocate memory\n");
+
+ create_threads(num_threads, iterations, finfo);
+
+ /*
+ * This thread wakes up thread 0
+ * Thread 0 will wake up 1
+ * Thread 1 will wake up 2
+ * ...
+ * The last thread will wake up 0
+ *
+ * This will repeat for the configured
+ * number of iterations.
+ */
+ pthread_mutex_unlock(&finfo[0].mutex);
+
+ /* Wait for all the threads to finish: */
+ if (checkout_threads(num_threads, finfo))
+ printf("[OK]\tNo incorrect case was found.\n");
+ else
+ printf("[FAIL]\tFailed with context switching test.\n");
+
+ free(finfo);
+}
+
+/*
+ * Ptrace test for the ABI format as described in arch/x86/include/asm/user.h
+ */
+
+/*
+ * Make sure the ptracee has the expanded kernel buffer on the first use.
+ * Then, initialize the state before performing the state injection from
+ * the ptracer. For non-dynamic states, this is benign.
+ */
+static inline void ptracee_touch_xstate(void)
+{
+ struct xsave_buffer *xbuf;
+
+ xbuf = alloc_xbuf();
+
+ load_rand_xstate(&xstate, xbuf);
+ load_init_xstate(&xstate, xbuf);
+
+ free(xbuf);
+}
+
+/*
+ * Ptracer injects the randomized xstate data. It also reads before and
+ * after that, which will execute the kernel's state copy functions.
+ */
+static void ptracer_inject_xstate(pid_t target)
+{
+ uint32_t xbuf_size = get_xbuf_size();
+ struct xsave_buffer *xbuf1, *xbuf2;
+ struct iovec iov;
+
+ /*
+ * Allocate buffers to keep data while ptracer can write the
+ * other buffer
+ */
+ xbuf1 = alloc_xbuf();
+ xbuf2 = alloc_xbuf();
+ if (!xbuf1 || !xbuf2)
+ ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
+
+ iov.iov_base = xbuf1;
+ iov.iov_len = xbuf_size;
+
+ if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ ksft_exit_fail_msg("PTRACE_GETREGSET failed\n");
+
+ printf("[RUN]\t%s: inject xstate via ptrace().\n", xstate.name);
+
+ load_rand_xstate(&xstate, xbuf1);
+ copy_xstate(xbuf2, xbuf1);
+
+ if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ ksft_exit_fail_msg("PTRACE_SETREGSET failed\n");
+
+ if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov))
+ ksft_exit_fail_msg("PTRACE_GETREGSET failed\n");
+
+ if (*(uint64_t *)get_fpx_sw_bytes(xbuf1) == xgetbv(0))
+ printf("[OK]\t'xfeatures' in SW reserved area was correctly written\n");
+ else
+ printf("[FAIL]\t'xfeatures' in SW reserved area was not correctly written\n");
+
+ if (validate_xstate_same(xbuf2, xbuf1))
+ printf("[OK]\txstate was correctly updated.\n");
+ else
+ printf("[FAIL]\txstate was not correctly updated.\n");
+
+ free(xbuf1);
+ free(xbuf2);
+}
+
+static void test_ptrace(void)
+{
+ pid_t child;
+ int status;
+
+ child = fork();
+ if (child < 0) {
+ ksft_exit_fail_msg("fork() failed\n");
+ } else if (!child) {
+ if (ptrace(PTRACE_TRACEME, 0, NULL, NULL))
+ ksft_exit_fail_msg("PTRACE_TRACEME failed\n");
+
+ ptracee_touch_xstate();
+
+ raise(SIGTRAP);
+ _exit(0);
+ }
+
+ do {
+ wait(&status);
+ } while (WSTOPSIG(status) != SIGTRAP);
+
+ ptracer_inject_xstate(child);
+
+ ptrace(PTRACE_DETACH, child, NULL, NULL);
+ wait(&status);
+ if (!WIFEXITED(status) || WEXITSTATUS(status))
+ ksft_exit_fail_msg("ptracee exit error\n");
+}
+
+/*
+ * Test signal delivery for the ABI compatibility.
+ * See the ABI format: arch/x86/include/uapi/asm/sigcontext.h
+ */
+
+/*
+ * Avoid using printf() in signal handlers as it is not
+ * async-signal-safe.
+ */
+#define SIGNAL_BUF_LEN 1000
+static char signal_message_buffer[SIGNAL_BUF_LEN];
+static void sig_print(char *msg)
+{
+ int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1;
+
+ strncat(signal_message_buffer, msg, left);
+}
+
+static struct xsave_buffer *stashed_xbuf;
+
+static void validate_sigfpstate(int sig, siginfo_t *si, void *ctx_void)
+{
+ ucontext_t *ctx = (ucontext_t *)ctx_void;
+ void *xbuf = ctx->uc_mcontext.fpregs;
+ struct _fpx_sw_bytes *sw_bytes;
+ uint32_t magic2;
+
+ /* Reset the signal message buffer: */
+ signal_message_buffer[0] = '\0';
+
+ sw_bytes = get_fpx_sw_bytes(xbuf);
+ if (sw_bytes->magic1 == FP_XSTATE_MAGIC1)
+ sig_print("[OK]\t'magic1' is valid\n");
+ else
+ sig_print("[FAIL]\t'magic1' is not valid\n");
+
+ if (get_fpx_sw_bytes_features(xbuf) & xstate.mask)
+ sig_print("[OK]\t'xfeatures' in SW reserved area is valid\n");
+ else
+ sig_print("[FAIL]\t'xfeatures' in SW reserved area is not valid\n");
+
+ if (get_xstatebv(xbuf) & xstate.mask)
+ sig_print("[OK]\t'xfeatures' in XSAVE header is valid\n");
+ else
+ sig_print("[FAIL]\t'xfeatures' in XSAVE header is not valid\n");
+
+ if (validate_xstate_same(stashed_xbuf, xbuf))
+ sig_print("[OK]\txstate delivery was successful\n");
+ else
+ sig_print("[FAIL]\txstate delivery was not successful\n");
+
+ magic2 = *(uint32_t *)(xbuf + sw_bytes->xstate_size);
+ if (magic2 == FP_XSTATE_MAGIC2)
+ sig_print("[OK]\t'magic2' is valid\n");
+ else
+ sig_print("[FAIL]\t'magic2' is not valid\n");
+
+ set_rand_data(&xstate, xbuf);
+ copy_xstate(stashed_xbuf, xbuf);
+}
+
+static void test_signal(void)
+{
+ bool valid_xstate;
+
+ /*
+ * The signal handler will access this to verify xstate context
+ * preservation.
+ */
+ stashed_xbuf = alloc_xbuf();
+ if (!stashed_xbuf)
+ ksft_exit_fail_msg("unable to allocate XSAVE buffer\n");
+
+ printf("[RUN]\t%s: load xstate and raise SIGUSR1\n", xstate.name);
+
+ sethandler(SIGUSR1, validate_sigfpstate, 0);
+
+ load_rand_xstate(&xstate, stashed_xbuf);
+
+ raise(SIGUSR1);
+
+ /*
+ * Immediately record the test result, deferring printf() to
+ * prevent unintended state contamination by that.
+ */
+ valid_xstate = validate_xregs_same(stashed_xbuf);
+ printf("%s", signal_message_buffer);
+
+ printf("[RUN]\t%s: load new xstate from sighandler and check it after sigreturn\n",
+ xstate.name);
+
+ if (valid_xstate)
+ printf("[OK]\txstate was restored correctly\n");
+ else
+ printf("[FAIL]\txstate restoration failed\n");
+
+ clearhandler(SIGUSR1);
+ free(stashed_xbuf);
+}
+
+void test_xstate(uint32_t feature_num)
+{
+ const unsigned int ctxtsw_num_threads = 5, ctxtsw_iterations = 10;
+ unsigned long features;
+ long rc;
+
+ if (!(XFEATURE_MASK_TEST_SUPPORTED & (1 << feature_num))) {
+ ksft_print_msg("The xstate test does not fully support the component %u, yet.\n",
+ feature_num);
+ return;
+ }
+
+ rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features);
+ if (rc || !(features & (1 << feature_num))) {
+ ksft_print_msg("The kernel does not support feature number: %u\n", feature_num);
+ return;
+ }
+
+ xstate = get_xstate_info(feature_num);
+ if (!xstate.size || !xstate.xbuf_offset) {
+ ksft_exit_fail_msg("invalid state size/offset (%d/%d)\n",
+ xstate.size, xstate.xbuf_offset);
+ }
+
+ test_context_switch(ctxtsw_num_threads, ctxtsw_iterations);
+ test_ptrace();
+ test_signal();
+}
diff --git a/tools/testing/selftests/x86/xstate.h b/tools/testing/selftests/x86/xstate.h
new file mode 100644
index 000000000000..42af36ec852f
--- /dev/null
+++ b/tools/testing/selftests/x86/xstate.h
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_XSTATE_H
+#define __SELFTESTS_X86_XSTATE_H
+
+#include <stdint.h>
+
+#include "../kselftest.h"
+
+#define XSAVE_HDR_OFFSET 512
+#define XSAVE_HDR_SIZE 64
+
+/*
+ * List of XSAVE features Linux knows about. Copied from
+ * arch/x86/include/asm/fpu/types.h
+ */
+enum xfeature {
+ XFEATURE_FP,
+ XFEATURE_SSE,
+ XFEATURE_YMM,
+ XFEATURE_BNDREGS,
+ XFEATURE_BNDCSR,
+ XFEATURE_OPMASK,
+ XFEATURE_ZMM_Hi256,
+ XFEATURE_Hi16_ZMM,
+ XFEATURE_PT_UNIMPLEMENTED_SO_FAR,
+ XFEATURE_PKRU,
+ XFEATURE_PASID,
+ XFEATURE_CET_USER,
+ XFEATURE_CET_KERNEL_UNUSED,
+ XFEATURE_RSRVD_COMP_13,
+ XFEATURE_RSRVD_COMP_14,
+ XFEATURE_LBR,
+ XFEATURE_RSRVD_COMP_16,
+ XFEATURE_XTILECFG,
+ XFEATURE_XTILEDATA,
+
+ XFEATURE_MAX,
+};
+
+/* Copied from arch/x86/kernel/fpu/xstate.c */
+static const char *xfeature_names[] =
+{
+ "x87 floating point registers",
+ "SSE registers",
+ "AVX registers",
+ "MPX bounds registers",
+ "MPX CSR",
+ "AVX-512 opmask",
+ "AVX-512 Hi256",
+ "AVX-512 ZMM_Hi256",
+ "Processor Trace (unused)",
+ "Protection Keys User registers",
+ "PASID state",
+ "Control-flow User registers",
+ "Control-flow Kernel registers (unused)",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "unknown xstate feature",
+ "AMX Tile config",
+ "AMX Tile data",
+ "unknown xstate feature",
+};
+
+struct xsave_buffer {
+ union {
+ struct {
+ char legacy[XSAVE_HDR_OFFSET];
+ char header[XSAVE_HDR_SIZE];
+ char extended[0];
+ };
+ char bytes[0];
+ };
+};
+
+static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm)
+{
+ uint32_t rfbm_hi = rfbm >> 32;
+ uint32_t rfbm_lo = rfbm;
+
+ asm volatile("xsave (%%rdi)"
+ : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)
+ : "memory");
+}
+
+static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm)
+{
+ uint32_t rfbm_hi = rfbm >> 32;
+ uint32_t rfbm_lo = rfbm;
+
+ asm volatile("xrstor (%%rdi)"
+ : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi));
+}
+
+#define CPUID_LEAF_XSTATE 0xd
+#define CPUID_SUBLEAF_XSTATE_USER 0x0
+
+static inline uint32_t get_xbuf_size(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ __cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER,
+ eax, ebx, ecx, edx);
+
+ /*
+ * EBX enumerates the size (in bytes) required by the XSAVE
+ * instruction for an XSAVE area containing all the user state
+ * components corresponding to bits currently set in XCR0.
+ */
+ return ebx;
+}
+
+struct xstate_info {
+ const char *name;
+ uint32_t num;
+ uint32_t mask;
+ uint32_t xbuf_offset;
+ uint32_t size;
+};
+
+static inline struct xstate_info get_xstate_info(uint32_t xfeature_num)
+{
+ struct xstate_info xstate = { };
+ uint32_t eax, ebx, ecx, edx;
+
+ if (xfeature_num >= XFEATURE_MAX) {
+ ksft_print_msg("unknown state\n");
+ return xstate;
+ }
+
+ xstate.name = xfeature_names[xfeature_num];
+ xstate.num = xfeature_num;
+ xstate.mask = 1 << xfeature_num;
+
+ __cpuid_count(CPUID_LEAF_XSTATE, xfeature_num,
+ eax, ebx, ecx, edx);
+ xstate.size = eax;
+ xstate.xbuf_offset = ebx;
+ return xstate;
+}
+
+static inline struct xsave_buffer *alloc_xbuf(void)
+{
+ uint32_t xbuf_size = get_xbuf_size();
+
+ /* XSAVE buffer should be 64B-aligned. */
+ return aligned_alloc(64, xbuf_size);
+}
+
+static inline void clear_xstate_header(struct xsave_buffer *xbuf)
+{
+ memset(&xbuf->header, 0, sizeof(xbuf->header));
+}
+
+static inline void set_xstatebv(struct xsave_buffer *xbuf, uint64_t bv)
+{
+ /* XSTATE_BV is at the beginning of the header: */
+ *(uint64_t *)(&xbuf->header) = bv;
+}
+
+/* See 'struct _fpx_sw_bytes' at sigcontext.h */
+#define SW_BYTES_OFFSET 464
+/* N.B. The struct's field name varies so read from the offset. */
+#define SW_BYTES_BV_OFFSET (SW_BYTES_OFFSET + 8)
+
+static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *xbuf)
+{
+ return xbuf + SW_BYTES_OFFSET;
+}
+
+static inline uint64_t get_fpx_sw_bytes_features(void *buffer)
+{
+ return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET);
+}
+
+static inline void set_rand_data(struct xstate_info *xstate, struct xsave_buffer *xbuf)
+{
+ int *ptr = (int *)&xbuf->bytes[xstate->xbuf_offset];
+ int data, i;
+
+ /*
+ * Ensure that 'data' is never 0. This ensures that
+ * the registers are never in their initial configuration
+ * and thus never tracked as being in the init state.
+ */
+ data = rand() | 1;
+
+ for (i = 0; i < xstate->size / sizeof(int); i++, ptr++)
+ *ptr = data;
+}
+
+/* Testing kernel's context switching and ABI support for the xstate. */
+void test_xstate(uint32_t feature_num);
+
+#endif /* __SELFTESTS_X86_XSTATE_H */